id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,800 | no op | import sys
import logging
from c2corg_api.scripts.migration.analyze_all_tables import AnalyzeAllTables
from c2corg_api.scripts.migration.area_associations import \
MigrateAreaAssociations
from c2corg_api.scripts.migration.climbing_site_routes import \
CreateClimbingSiteRoutes
from c2corg_api.scripts.migration.documents.xreports import MigrateXreports
from c2corg_api.scripts.migration.documents.area import MigrateAreas
from c2corg_api.scripts.migration.documents.associations import \
MigrateAssociations
from c2corg_api.scripts.migration.documents.maps import MigrateMaps
from c2corg_api.scripts.migration.documents.route_title_prefix import \
SetRouteTitlePrefix
from c2corg_api.scripts.migration.documents.user_profiles import \
MigrateUserProfiles
from c2corg_api.scripts.migration.documents.outings import MigrateOutings
from c2corg_api.scripts.migration.documents.images import MigrateImages
from c2corg_api.scripts.migration.documents.articles import MigrateArticles
from c2corg_api.scripts.migration.documents.books import MigrateBooks
from c2corg_api.scripts.migration.map_associations import \
MigrateMapAssociations
from c2corg_api.scripts.migration.set_default_geometries import \
SetDefaultGeometries
from sqlalchemy import engine_from_config
import os
from sqlalchemy.orm import sessionmaker
from pyramid.paster import get_appsettings
from zope.sqlalchemy import register
from c2corg_api.scripts.initializedb import setup_db
from c2corg_api.scripts.migration.users import MigrateUsers
from c2corg_api.scripts.migration.documents.routes import MigrateRoutes
from c2corg_api.scripts.migration.documents.versions import MigrateVersions
from c2corg_api.scripts.migration.documents.waypoints.huts import MigrateHuts
from c2corg_api.scripts.migration.documents.waypoints.parking import \
MigrateParkings
from c2corg_api.scripts.migration.documents.waypoints.products import \
MigrateProducts
from c2corg_api.scripts.migration.documents.waypoints.sites import MigrateSites
from c2corg_api.scripts.migration.documents.waypoints.summit import \
MigrateSummits
from c2corg_api.scripts.migration.sequences import UpdateSequences
from c2corg_api.scripts.migration.init_feed import InitFeed
from c2corg_api.scripts.migration.mailinglists import MigrateMailinglists
from alembic.config import Config
# no-op function referenced from `migration.ini` (required for
# `get_appsettings` to work)
def METHOD_NAME(global_config, **settings): pass
def main(argv=sys.argv):
alembic_configfile = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../../alembic.ini'))
alembic_config = Config(alembic_configfile)
settings_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'migration.ini')
settings = get_appsettings(settings_file)
engine_target = engine_from_config(settings, 'sqlalchemy_target.')
engine_source = engine_from_config(settings, 'sqlalchemy_source.')
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
Session = sessionmaker() # noqa
register(Session)
session = Session(bind=engine_target)
# set up the target database
setup_db(alembic_config, session)
connection_source = engine_source.connect()
batch_size = 1000
MigrateAreas(connection_source, session, batch_size).migrate()
MigrateUserProfiles(connection_source, session, batch_size).migrate()
MigrateUsers(connection_source, session, batch_size).migrate()
MigrateSummits(connection_source, session, batch_size).migrate()
MigrateParkings(connection_source, session, batch_size).migrate()
MigrateSites(connection_source, session, batch_size).migrate()
MigrateProducts(connection_source, session, batch_size).migrate()
MigrateHuts(connection_source, session, batch_size).migrate()
MigrateRoutes(connection_source, session, batch_size).migrate()
MigrateMaps(connection_source, session, batch_size).migrate()
MigrateOutings(connection_source, session, batch_size).migrate()
MigrateImages(connection_source, session, batch_size).migrate()
MigrateXreports(connection_source, session, batch_size).migrate()
MigrateArticles(connection_source, session, batch_size).migrate()
MigrateBooks(connection_source, session, batch_size).migrate()
MigrateVersions(connection_source, session, batch_size).migrate()
MigrateAssociations(connection_source, session, batch_size).migrate()
CreateClimbingSiteRoutes(connection_source, session, batch_size).migrate()
SetRouteTitlePrefix(connection_source, session, batch_size).migrate()
SetDefaultGeometries(connection_source, session, batch_size).migrate()
MigrateAreaAssociations(connection_source, session, batch_size).migrate()
MigrateMapAssociations(connection_source, session, batch_size).migrate()
MigrateMailinglists(connection_source, session, batch_size).migrate()
UpdateSequences(connection_source, session, batch_size).migrate()
InitFeed(connection_source, session, batch_size).migrate()
AnalyzeAllTables(connection_source, session, batch_size).migrate()
if __name__ == "__main__":
main() |
298,801 | test diff | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
running test case
we test following operations:
set_difference
set_union
set_intersection
concat
drop_duplicates
>> mpirun --mca opal_cuda_support 1 -n 4 -quiet python -m pytest --with-mpi -q python/pygcylon/test/test_setops.py
'''
import pytest
import cudf
import pycylon as cy
import pygcylon as gcy
@pytest.mark.mpi
def METHOD_NAME():
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
diffFile1 = "data/output/diff_df1-df2_" + str(env.rank) + ".csv"
diffFile2 = "data/output/diff_df2-df1_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
diff1 = df1.set_difference(other=df2, env=env)
diff2 = df2.set_difference(other=df1, env=env)
# sort difference dataframes
diff1_sorted = diff1.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
diff2_sorted = diff2.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
saved_diff1 = cudf.read_csv(diffFile1).sort_values(by=["city", "state_id"], ignore_index=True)
saved_diff2 = cudf.read_csv(diffFile2).sort_values(by=["city", "state_id"], ignore_index=True)
assert diff1_sorted.equals(saved_diff1), \
"First Difference DataFrame and the DataFrame from file are not equal"
assert diff2_sorted.equals(saved_diff2), \
"Second Difference DataFrame and the DataFrame from file are not equal"
# env.finalize()
@pytest.mark.mpi
def test_union():
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
unionFile = "data/output/union_cities_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
unionDf = df1.set_union(other=df2, env=env)
union_sorted = unionDf.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
saved_union = cudf.read_csv(unionFile).sort_values(by=["city", "state_id"], ignore_index=True)
assert union_sorted.equals(saved_union), \
"Union DataFrame and the DataFrame from file are not equal"
# env.finalize()
@pytest.mark.mpi
def test_intersect():
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
intersectFile = "data/output/intersect_cities_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
intersectDf = df1.set_intersect(other=df2, env=env)
intersect_sorted = intersectDf.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
saved_intersect = cudf.read_csv(intersectFile).sort_values(by=["city", "state_id"], ignore_index=True)
assert intersect_sorted.equals(saved_intersect), \
"Intersect DataFrame and the DataFrame from file are not equal"
# env.finalize()
@pytest.mark.mpi
def test_concat():
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
concatFile = "data/output/concat_cities_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
concatedDf = gcy.concat([df1, df2], env=env)
concated_sorted = concatedDf.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
saved_concated = cudf.read_csv(concatFile).sort_values(by=["city", "state_id"], ignore_index=True)
assert concated_sorted.equals(saved_concated), \
"Concatanated DataFrame and the DataFrame from file are not equal"
# env.finalize()
@pytest.mark.mpi
def test_drop_duplicates():
"""
We first perform concatenation of two dataframes,
then drop duplicates.
Resulting dataframe must be equal to the union of the two original dataframe
"""
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
unionFile = "data/output/union_cities_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
concatedDf = gcy.concat([df1, df2], env=env)
duplicates_dropped = concatedDf.drop_duplicates(ignore_index=True, env=env)
d_dropped_sorted = duplicates_dropped.to_cudf().sort_values(by=["city", "state_id"], ignore_index=True)
saved_union = cudf.read_csv(unionFile).sort_values(by=["city", "state_id"], ignore_index=True)
assert d_dropped_sorted.equals(saved_union), \
"Duplicates dropped DataFrame and the DataFrame from file are not equal"
# env.finalize()
|
298,802 | gcc common flags | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
"""
gcc/llvm detection.
"""
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_gcc(conf):
"""
Find the program gcc, and if present, try to detect its version number
"""
cc = conf.find_program(['gcc', 'cc'], var='CC')
conf.get_cc_version(cc, gcc=True)
conf.env.CC_NAME = 'gcc'
@conf
def METHOD_NAME(conf):
"""
Common flags for gcc on nearly all platforms
"""
v = conf.env
v.CC_SRC_F = []
v.CC_TGT_F = ['-c', '-o']
if not v.LINK_CC:
v.LINK_CC = v.CC
v.CCLNK_SRC_F = []
v.CCLNK_TGT_F = ['-o']
v.CPPPATH_ST = '-I%s'
v.DEFINES_ST = '-D%s'
v.LIB_ST = '-l%s' # template for adding libs
v.LIBPATH_ST = '-L%s' # template for adding libpaths
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-L%s'
v.RPATH_ST = '-Wl,-rpath,%s'
v.SONAME_ST = '-Wl,-h,%s'
v.SHLIB_MARKER = '-Wl,-Bdynamic'
v.STLIB_MARKER = '-Wl,-Bstatic'
v.cprogram_PATTERN = '%s'
v.CFLAGS_cshlib = ['-fPIC']
v.LINKFLAGS_cshlib = ['-shared']
v.cshlib_PATTERN = 'lib%s.so'
v.LINKFLAGS_cstlib = ['-Wl,-Bstatic']
v.cstlib_PATTERN = 'lib%s.a'
v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
v.CFLAGS_MACBUNDLE = ['-fPIC']
v.macbundle_PATTERN = '%s.bundle'
@conf
def gcc_modifier_win32(conf):
"""Configuration flags for executing gcc on Windows"""
v = conf.env
v.cprogram_PATTERN = '%s.exe'
v.cshlib_PATTERN = '%s.dll'
v.implib_PATTERN = '%s.dll.a'
v.IMPLIB_ST = '-Wl,--out-implib,%s'
v.CFLAGS_cshlib = []
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def gcc_modifier_cygwin(conf):
"""Configuration flags for executing gcc on Cygwin"""
gcc_modifier_win32(conf)
v = conf.env
v.cshlib_PATTERN = 'cyg%s.dll'
v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base'])
v.CFLAGS_cshlib = []
@conf
def gcc_modifier_darwin(conf):
"""Configuration flags for executing gcc on MacOS"""
v = conf.env
v.CFLAGS_cshlib = ['-fPIC']
v.LINKFLAGS_cshlib = ['-dynamiclib']
v.cshlib_PATTERN = 'lib%s.dylib'
v.FRAMEWORKPATH_ST = '-F%s'
v.FRAMEWORK_ST = ['-framework']
v.ARCH_ST = ['-arch']
v.LINKFLAGS_cstlib = []
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.SONAME_ST = []
@conf
def gcc_modifier_aix(conf):
"""Configuration flags for executing gcc on AIX"""
v = conf.env
v.LINKFLAGS_cprogram = ['-Wl,-brtl']
v.LINKFLAGS_cshlib = ['-shared','-Wl,-brtl,-bexpfull']
v.SHLIB_MARKER = []
@conf
def gcc_modifier_hpux(conf):
v = conf.env
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.CFLAGS_cshlib = ['-fPIC','-DPIC']
v.cshlib_PATTERN = 'lib%s.sl'
@conf
def gcc_modifier_openbsd(conf):
conf.env.SONAME_ST = []
@conf
def gcc_modifier_osf1V(conf):
v = conf.env
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.SONAME_ST = []
@conf
def gcc_modifier_platform(conf):
"""Execute platform-specific functions based on *gcc_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None)
if gcc_modifier_func:
gcc_modifier_func()
def configure(conf):
"""
Configuration for gcc
"""
conf.find_gcc()
conf.find_ar()
conf.METHOD_NAME()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
conf.check_gcc_o_space()
|
298,803 | rbo res | from __future__ import division
from bisect import bisect_left
from collections import namedtuple
import numpy as np
from scipy.spatial import distance
import math
RBO = namedtuple("RBO", "min res ext")
RBO.__doc__ += ": Result of full RBO analysis"
RBO.min.__doc__ = "Lower bound estimate"
RBO.res.__doc__ = "Residual corresponding to min; min + res is an upper bound estimate"
RBO.ext.__doc__ = "Extrapolated point estimate"
def logistic(x, k):
return 1/ (1 + math.exp(-k*x))
def _round(obj):
if isinstance(obj, RBO):
return RBO(_round(obj.min), _round(obj.res), _round(obj.ext))
else:
return round(obj, 3)
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def embeddings_overlap(list1, list2, depth, index2word, embedding_space, norm=True):
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
word_list1 = [index2word[index] for index in list1]
word_list2 = [index2word[index] for index in list2]
centroid_1 = np.mean([embedding_space[w] for w in word_list1[:depth] if w in embedding_space], axis=0)
centroid_2 = np.mean([embedding_space[w] for w in word_list2[:depth] if w in embedding_space], axis=0)
cos_sim = 1 - distance.cosine(centroid_1, centroid_2)
if cos_sim > 1:
cos_sim = 1
elif cos_sim < -1:
cos_sim = -1
if norm:
e_ov = 1 - (math.acos(cos_sim) / math.pi)
else:
e_ov = cos_sim
return e_ov, len(set1), len(set2)
def overlap(list1, list2, depth, index2word, embedding_space, norm):
# return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
# NOTE: comment the preceding and uncomment the following line if you want
# to stick to the algorithm as defined by the paper
ov = embeddings_overlap(list1, list2, depth, index2word, embedding_space, norm=norm)[0]
# print("overlap", ov)
return ov
def agreement(list1, list2, depth, index2word, embedding_space, norm):
"""Proportion of shared values between two sorted lists at given depth."""
len_intersection, len_set1, len_set2 = embeddings_overlap(list1, list2, depth, index2word, embedding_space=embedding_space, norm=norm)
return 2 * len_intersection / (len_set1 + len_set2)
def cumulative_agreement(list1, list2, depth, index2word, embedding_space, norm):
return (agreement(list1, list2, d, index2word, embedding_space, norm) for d in range(1, depth + 1))
def rbo_min(list1, list2, p, index2word, embedding_space, norm=True, depth=None):
"""Tight lower bound on RBO.
See equation (11) in paper.
"""
depth = min(len(list1), len(list2)) if depth is None else depth
x_k = overlap(list1, list2, depth, index2word, embedding_space, norm)
log_term = x_k * math.log(1 - p)
sum_term = sum(
p ** d / d * (overlap(list1, list2, d, index2word, embedding_space=embedding_space, norm=norm) - x_k) for d in range(1, depth + 1)
)
return (1 - p) / p * (sum_term - log_term)
def METHOD_NAME(list1, list2, p, index2word, embedding_space, norm=True):
"""Upper bound on residual overlap beyond evaluated depth.
See equation (30) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible. In particular, for identical lists, ``rbo_min()`` and
``rbo_res()`` should add up to 1, which is the case.
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l, index2word, embedding_space, norm=norm)
# since overlap(...) can be fractional in the general case of ties and f
# must be an integer --> math.ceil()
f = int(math.ceil(l + s - x_l))
# upper bound of range() is non-inclusive, therefore + 1 is needed
term1 = s * sum(p ** d / d for d in range(s + 1, f + 1))
term2 = l * sum(p ** d / d for d in range(l + 1, f + 1))
term3 = x_l * (math.log(1 / (1 - p)) - sum(p ** d / d for d in range(1, f + 1)))
return p ** s + p ** l - p ** f - (1 - p) / p * (term1 + term2 + term3)
def rbo_ext(list1, list2, p, index2word, embedding_space, norm=True):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l, index2word, embedding_space=embedding_space, norm=norm)
x_s = overlap(list1, list2, s, index2word, embedding_space=embedding_space, norm=norm)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d, index2word=index2word, embedding_space=embedding_space, norm=norm)
for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def word_embeddings_rbo(list1, list2, p, index2word, embedding_space, norm):
"""Complete RBO analysis (lower bound, residual, point estimate).
``list`` arguments should be already correctly sorted iterables and each
item should either be an atomic value or a set of values tied for that
rank. ``p`` is the probability of looking for overlap at rank k + 1 after
having examined rank k.
RBO(min=0.489, res=0.477, ext=0.967)
"""
if not 0 <= p <= 1:
raise ValueError("The ``p`` parameter must be between 0 and 1.")
args = (list1, list2, p, index2word, embedding_space, norm)
return RBO(rbo_min(*args), METHOD_NAME(*args), rbo_ext(*args))
def sort_dict(dct, *, ascending=False):
"""Sort keys in ``dct`` according to their corresponding values.
Sorts in descending order by default, because the values are
typically scores, i.e. the higher the better. Specify
``ascending=True`` if the values are ranks, or some sort of score
where lower values are better.
Ties are handled by creating sets of tied keys at the given position
in the sorted list.
>>> dct = dict(a=1, b=2, c=1, d=3)
>>> list(sort_dict(dct)) == ['d', 'b', {'a', 'c'}]
True
>>> list(sort_dict(dct, ascending=True)) == [{'a', 'c'}, 'b', 'd']
True
"""
scores = []
items = []
# items should be unique, scores don't have to
for item, score in dct.items():
if not ascending:
score *= -1
i = bisect_left(scores, score)
if i == len(scores):
scores.append(score)
items.append(item)
elif scores[i] == score:
existing_item = items[i]
if isinstance(existing_item, set):
existing_item.add(item)
else:
items[i] = {existing_item, item}
else:
scores.insert(i, score)
items.insert(i, item)
return items
def rbo_dict(dict1, dict2, p, index2word, embedding_space, norm, *, sort_ascending=False):
"""Wrapper around ``rbo()`` for dict input.
Each dict maps items to be sorted to the score according to which
they should be sorted. The RBO analysis is then performed on the
resulting sorted lists.
The sort is descending by default, because scores are typically the
higher the better, but this can be overridden by specifying
``sort_ascending=True``.
"""
list1, list2 = (
sort_dict(dict1, ascending=sort_ascending),
sort_dict(dict2, ascending=sort_ascending),
)
return word_embeddings_rbo(list1, list2, p, index2word, embedding_space, norm) |
298,804 | sendto | # SPDX-FileCopyrightText: Christian Amsüss and the aiocoap contributors
#
# SPDX-License-Identifier: MIT
"""Confront an aiocoap server with a client that speaks so bad protocol it is
easier to mock with sending byte sequences than with aiocoap"""
import sys
import socket
import asyncio
from asyncio import wait_for, TimeoutError
import signal
import contextlib
import os
import unittest
import aiocoap
from .test_server import WithTestServer, precise_warnings, no_warnings, asynctest, WithAsyncLoop
# For some reasons site-local requests do not work on my test setup, resorting
# to link-local; that means a link needs to be given, and while we never need
# to find the default multicast interface to join MC groups, we need to know it
# to address them. This needs support from outside the test suite right now.
_skip_unless_defaultmcif = unittest.skipIf(
"AIOCOAP_TEST_MCIF" not in os.environ,
"Multicast tests require AIOCOAP_TEST_MCIF environment variable to tell"
" the default multicast interface")
class MockSockProtocol:
def __init__(self, remote_addr):
# It should be pointed out here that this whole mocksock thing is not
# terribly well thought out, and just hacked together to replace the
# blocking sockets that used to be there (which were equally hacked
# together)
self.incoming_queue = asyncio.Queue()
self.remote_addr = remote_addr
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
self.incoming_queue.put_nowait((data, addr))
async def close(self):
pass
# emulating the possibly connected socket.socket this once was
def send(self, data):
self.transport.METHOD_NAME(data, self.remote_addr)
def METHOD_NAME(self, data, addr):
self.transport.METHOD_NAME(data, addr)
async def recv(self):
return (await self.incoming_queue.get())[0]
class WithMockSock(WithAsyncLoop):
def setUp(self):
super().setUp()
_, self.mocksock = self.loop.run_until_complete(
self.loop.create_datagram_endpoint(
lambda: MockSockProtocol(self.mocksock_remote_addr),
family=socket.AF_INET6,
))
def tearDown(self):
self.loop.run_until_complete(self.mocksock.close())
super().tearDown()
class TestNoncoapClient(WithTestServer, WithMockSock):
def setUp(self):
self.mocksock_remote_addr = (self.serveraddress, aiocoap.COAP_PORT)
super().setUp()
@precise_warnings(["Ignoring unparsable message from ..."])
@asynctest
async def test_veryshort(self):
self.mocksock.send(b'\x40')
await asyncio.sleep(0.1)
@precise_warnings(["Ignoring unparsable message from ..."])
@asynctest
async def test_short_mid(self):
self.mocksock.send(b'\x40\x01\x97')
await asyncio.sleep(0.1)
@precise_warnings(["Ignoring unparsable message from ..."])
@asynctest
async def test_version2(self):
self.mocksock.send(b'\x80\x01\x99\x98')
await asyncio.sleep(0.1)
@no_warnings
@asynctest
async def test_duplicate(self):
self.mocksock.send(b'\x40\x01\x99\x99') # that's a GET /
await asyncio.sleep(0.1)
self.mocksock.send(b'\x40\x01\x99\x99') # that's a GET /
await asyncio.sleep(0.1)
r1 = r2 = None
try:
r1 = await wait_for(self.mocksock.recv(), timeout=1)
r2 = await wait_for(self.mocksock.recv(), timeout=1)
except TimeoutError:
pass
self.assertEqual(r1, r2, "Duplicate GETs gave different responses")
self.assertTrue(r1 is not None, "No responses received to duplicate GET")
@no_warnings
@asynctest
async def test_ping(self):
self.mocksock.send(b'\x40\x00\x99\x9a') # CoAP ping -- should this test be doable in aiocoap?
response = await asyncio.wait_for(self.mocksock.recv(), timeout=1)
assert response == b'\x70\x00\x99\x9a'
@no_warnings
@asynctest
async def test_noresponse(self):
self.mocksock.send(b'\x50\x01\x99\x9b\xd1\xf5\x02') # CoAP NON GET / with no-response on 2.xx
try:
response = await wait_for(self.mocksock.recv(), timeout=1)
self.assertTrue(False, "Response was sent when No-Response should have suppressed it")
except TimeoutError:
pass
@no_warnings
@asynctest
async def test_unknownresponse_reset(self):
self.mocksock.send(bytes.fromhex("4040ffff")) # CoAP CON 2.00 that the server has not sent a request for
response = await wait_for(self.mocksock.recv(), timeout=1)
self.assertEqual(response, bytes.fromhex("7000ffff"), "Unknown CON Response did not trigger RST")
@no_warnings
@asynctest
async def test_unknownresponse_noreset(self):
self.mocksock.send(bytes.fromhex("6040ffff")) # CoAP ACK 2.00 that the server has not sent a request for
try:
response = await wait_for(self.mocksock.recv(), timeout=1)
self.assertTrue(False, "Unknown ACK Response triggered something")
except TimeoutError:
pass
# Skipping the whole class when no multicast address was given (as otherwise
# it'd try binding :: which is bound to fail with a simplesocketserver setting)
@_skip_unless_defaultmcif
class TestNoncoapMulticastClient(WithTestServer, WithMockSock):
# This exposes the test server to traffic from the environment system for
# some time; it's only run if a default multicast inteface is given
# explicitly, though.
serveraddress = '::'
def setUp(self):
# always used with sendto
self.mocksock_remote_addr = None
super().setUp()
@no_warnings
@asynctest
async def test_mutlicast_ping(self):
# exactly like the unicast case -- just to verify we're actually reaching our server
self.mocksock.METHOD_NAME(b'\x40\x00\x99\x9a', (aiocoap.numbers.constants.MCAST_IPV6_LINKLOCAL_ALLCOAPNODES, aiocoap.COAP_PORT, 0, socket.if_nametoindex(os.environ['AIOCOAP_TEST_MCIF'])))
response = await wait_for(self.mocksock.recv(), timeout=1)
assert response == b'\x70\x00\x99\x9a'
@no_warnings
@asynctest
async def test_multicast_unknownresponse_noreset(self):
self.mocksock.METHOD_NAME(bytes.fromhex("4040ffff"), (aiocoap.numbers.constants.MCAST_IPV6_LINKLOCAL_ALLCOAPNODES, aiocoap.COAP_PORT, 0, socket.if_nametoindex(os.environ['AIOCOAP_TEST_MCIF'])))
try:
response = await wait_for(self.mocksock.recv(), timeout=1)
except TimeoutError:
pass
else:
self.assertEqual(False, "Message was sent back responding to CON response to multicast address") |
298,805 | test doc examples | import doctest
import pytest
from insights.combiners import smt
from insights.combiners.smt import CpuTopology
from insights.parsers.smt import CpuCoreOnline, CpuSiblings
from insights.tests import context_wrap
# Path for core online files
ONLINE_PATH = "/sys/devices/system/cpu/cpu{0}/online"
# Path for core siblings files
SIBLINGS_PATH = "/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list"
@pytest.fixture
def cpu_all_online():
return [
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(0))),
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(1))),
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(2))),
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(3)))
]
def test_hyperthreading_all_online(cpu_all_online):
siblings = [
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("1,3", path=SIBLINGS_PATH.format(1))),
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(2))),
CpuSiblings(context_wrap("1,3", path=SIBLINGS_PATH.format(3)))
]
cpu_topology = CpuTopology(cpu_all_online, siblings)
assert cpu_topology.online(0)
assert cpu_topology.siblings(0) == [0, 2]
assert cpu_topology.online(1)
assert cpu_topology.siblings(1) == [1, 3]
assert cpu_topology.online(2)
assert cpu_topology.siblings(2) == [0, 2]
assert cpu_topology.online(3)
assert cpu_topology.siblings(3) == [1, 3]
assert not cpu_topology.all_solitary
def test_hyperthreading_some_online():
online = [
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(0))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(1))),
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(2))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(3)))
]
siblings = [
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(2)))
]
cpu_topology = CpuTopology(online, siblings)
assert cpu_topology.online(0)
assert cpu_topology.siblings(0) == [0, 2]
assert not cpu_topology.online(1)
assert cpu_topology.siblings(1) == []
assert cpu_topology.online(2)
assert cpu_topology.siblings(2) == [0, 2]
assert not cpu_topology.online(3)
assert cpu_topology.siblings(3) == []
assert not cpu_topology.all_solitary
def test_without_hyperthreading_all_online(cpu_all_online):
siblings = [
CpuSiblings(context_wrap("0", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("1", path=SIBLINGS_PATH.format(1))),
CpuSiblings(context_wrap("2", path=SIBLINGS_PATH.format(2))),
CpuSiblings(context_wrap("3", path=SIBLINGS_PATH.format(3)))
]
cpu_topology = CpuTopology(cpu_all_online, siblings)
assert cpu_topology.online(0)
assert cpu_topology.siblings(0) == [0]
assert cpu_topology.online(1)
assert cpu_topology.siblings(1) == [1]
assert cpu_topology.online(2)
assert cpu_topology.siblings(2) == [2]
assert cpu_topology.online(3)
assert cpu_topology.siblings(3) == [3]
assert cpu_topology.all_solitary
def test_without_hyperthreading_some_online():
online = [
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(0))),
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(1))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(2))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(3)))
]
siblings = [
CpuSiblings(context_wrap("0", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("1", path=SIBLINGS_PATH.format(1)))
]
cpu_topology = CpuTopology(online, siblings)
assert cpu_topology.online(0)
assert cpu_topology.siblings(0) == [0]
assert cpu_topology.online(1)
assert cpu_topology.siblings(1) == [1]
assert not cpu_topology.online(2)
assert cpu_topology.siblings(2) == []
assert not cpu_topology.online(3)
assert cpu_topology.siblings(3) == []
assert cpu_topology.all_solitary
def test_wrong_index():
online = [
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(0))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(1))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(2))),
CpuCoreOnline(context_wrap("0", path=ONLINE_PATH.format(3)))
]
siblings = [
CpuSiblings(context_wrap("0", path=SIBLINGS_PATH.format(0))),
]
c = CpuTopology(online, siblings)
assert c.online(-1) is None
assert c.siblings(-1) is None
assert c.online(4) is None
assert c.siblings(4) is None
def METHOD_NAME(cpu_all_online):
siblings = [
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("1,3", path=SIBLINGS_PATH.format(1))),
CpuSiblings(context_wrap("0,2", path=SIBLINGS_PATH.format(2))),
CpuSiblings(context_wrap("1,3", path=SIBLINGS_PATH.format(3)))
]
env = {
"cpu_topology": CpuTopology(cpu_all_online, siblings),
}
failed, total = doctest.testmod(smt, globs=env)
assert failed == 0
def test_without_hyperthreading_all_online_missing_cpu0_online_file():
online = [
CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(1))),
]
siblings = [
CpuSiblings(context_wrap("0", path=SIBLINGS_PATH.format(0))),
CpuSiblings(context_wrap("1", path=SIBLINGS_PATH.format(1)))
]
cpu_topology = CpuTopology(online, siblings)
assert cpu_topology.online(0)
assert cpu_topology.siblings(0) == [0]
assert cpu_topology.online(1)
assert cpu_topology.siblings(1) == [1]
assert cpu_topology.all_solitary |
298,806 | test idct | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import numpy as np
import pytest
import scipy.fftpack as fftpack
import torch
import pyro
from pyro.ops.tensor_utils import (
block_diag_embed,
block_diagonal,
convolve,
dct,
idct,
next_fast_len,
periodic_cumsum,
periodic_features,
periodic_repeat,
precision_to_scale_tril,
repeated_matmul,
)
from tests.common import assert_close, assert_equal
pytestmark = pytest.mark.stage("unit")
@pytest.mark.parametrize("batch_size", [1, 2, 3])
@pytest.mark.parametrize(
"block_size", [torch.Size([2, 2]), torch.Size([3, 1]), torch.Size([4, 2])]
)
def test_block_diag_embed(batch_size, block_size):
m = torch.randn(block_size).unsqueeze(0).expand((batch_size,) + block_size)
b = block_diag_embed(m)
assert b.shape == (batch_size * block_size[0], batch_size * block_size[1])
assert_equal(b.sum(), m.sum())
for k in range(batch_size):
bottom, top = k * block_size[0], (k + 1) * block_size[0]
left, right = k * block_size[1], (k + 1) * block_size[1]
assert_equal(b[bottom:top, left:right], m[k])
@pytest.mark.parametrize("batch_shape", [torch.Size([]), torch.Size([7])])
@pytest.mark.parametrize(
"mat_size,block_size",
[(torch.Size([2, 2]), 2), (torch.Size([3, 1]), 1), (torch.Size([6, 3]), 3)],
)
def test_block_diag(batch_shape, mat_size, block_size):
mat = torch.randn(batch_shape + (block_size,) + mat_size)
mat_embed = block_diag_embed(mat)
mat_embed_diag = block_diagonal(mat_embed, block_size)
assert_equal(mat_embed_diag, mat)
@pytest.mark.parametrize("size", [5, 6, 7, 8])
@pytest.mark.parametrize("period", [2, 3, 4])
@pytest.mark.parametrize("left_shape", [(), (6,), (3, 2)], ids=str)
@pytest.mark.parametrize("right_shape", [(), (7,), (5, 4)], ids=str)
def test_periodic_repeat(period, size, left_shape, right_shape):
dim = -1 - len(right_shape)
tensor = torch.randn(left_shape + (period,) + right_shape)
actual = periodic_repeat(tensor, size, dim)
assert actual.shape == left_shape + (size,) + right_shape
dots = (slice(None),) * len(left_shape)
for t in range(size):
assert_equal(actual[dots + (t,)], tensor[dots + (t % period,)])
@pytest.mark.parametrize("duration", range(3, 100))
def test_periodic_features(duration):
pyro.set_rng_seed(duration)
max_period = torch.distributions.Uniform(2, duration).sample().item()
for max_period in [max_period, duration]:
min_period = torch.distributions.Uniform(2, max_period).sample().item()
for min_period in [min_period, 2]:
actual = periodic_features(duration, max_period, min_period)
assert actual.shape == (
duration,
2 * math.ceil(max_period / min_period) - 2,
)
assert (-1 <= actual).all()
assert (actual <= 1).all()
@pytest.mark.parametrize("size", [5, 6, 7, 8])
@pytest.mark.parametrize("period", [2, 3, 4])
@pytest.mark.parametrize("left_shape", [(), (6,), (3, 2)], ids=str)
@pytest.mark.parametrize("right_shape", [(), (7,), (5, 4)], ids=str)
def test_periodic_cumsum(period, size, left_shape, right_shape):
dim = -1 - len(right_shape)
tensor = torch.randn(left_shape + (size,) + right_shape)
actual = periodic_cumsum(tensor, period, dim)
assert actual.shape == tensor.shape
dots = (slice(None),) * len(left_shape)
for t in range(period):
assert_equal(actual[dots + (t,)], tensor[dots + (t,)])
for t in range(period, size):
assert_close(
actual[dots + (t,)], tensor[dots + (t,)] + actual[dots + (t - period,)]
)
@pytest.mark.parametrize("m", [2, 3, 4, 5, 6, 10])
@pytest.mark.parametrize("n", [2, 3, 4, 5, 6, 10])
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
def test_convolve_shape(m, n, mode):
signal = torch.randn(m)
kernel = torch.randn(n)
actual = convolve(signal, kernel, mode)
expected = np.convolve(signal, kernel, mode=mode)
assert actual.shape == expected.shape
@pytest.mark.parametrize("m", [2, 3, 4, 5, 6, 10])
@pytest.mark.parametrize("n", [2, 3, 4, 5, 6, 10])
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
def test_convolve(batch_shape, m, n, mode):
signal = torch.randn(*batch_shape, m)
kernel = torch.randn(*batch_shape, n)
actual = convolve(signal, kernel, mode)
expected = torch.stack(
[
torch.tensor(np.convolve(s, k, mode=mode))
for s, k in zip(signal.reshape(-1, m), kernel.reshape(-1, n))
]
).reshape(*batch_shape, -1)
assert_close(actual, expected)
@pytest.mark.parametrize(
"size", [torch.Size([2, 2]), torch.Size([4, 3, 3]), torch.Size([4, 1, 2, 2])]
)
@pytest.mark.parametrize("n", [1, 2, 3, 7, 8])
def test_repeated_matmul(size, n):
M = torch.randn(size)
result = repeated_matmul(M, n)
assert result.shape == ((n,) + size)
serial_result = M
for i in range(n):
assert_equal(result[i, ...], serial_result)
serial_result = torch.matmul(serial_result, M)
@pytest.mark.parametrize("shape", [(3, 4), (5,), (2, 1, 6)])
def test_dct(shape):
x = torch.randn(shape)
actual = dct(x)
expected = torch.from_numpy(fftpack.dct(x.numpy(), norm="ortho"))
assert_close(actual, expected)
@pytest.mark.parametrize("shape", [(3, 4), (5,), (2, 1, 6)])
def METHOD_NAME(shape):
x = torch.randn(shape)
actual = idct(x)
expected = torch.from_numpy(fftpack.idct(x.numpy(), norm="ortho"))
assert_close(actual, expected)
@pytest.mark.parametrize("dim", [-4, -3, -2, -1, 0, 1, 2, 3])
@pytest.mark.parametrize("fn", [dct, idct])
def test_dct_dim(fn, dim):
x = torch.randn(4, 5, 6, 7)
actual = fn(x, dim=dim)
if dim == -1 or dim == 3:
expected = fn(x)
else:
expected = fn(x.transpose(-1, dim)).transpose(-1, dim)
assert_close(actual, expected)
def test_next_fast_len():
for size in range(1, 1000):
assert next_fast_len(size) == fftpack.next_fast_len(size)
@pytest.mark.parametrize(
"batch_shape,event_shape",
[
((), (5,)),
((3,), (4,)),
],
)
def test_precision_to_scale_tril(batch_shape, event_shape):
x = torch.randn(batch_shape + event_shape + event_shape)
precision = x.matmul(x.transpose(-2, -1))
actual = precision_to_scale_tril(precision)
expected = torch.linalg.cholesky(precision.inverse())
assert_close(actual, expected) |
298,807 | get build args | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
'''
Build a package from source.
'''
from __future__ import print_function
import os
# Cache the developer package loaded from cwd. This is so the package is only
# loaded once, even though it's required once at arg parsing time (to determine
# valid build system types), and once at command run time.
#
_package = None
def get_current_developer_package():
from rez.packages import get_developer_package
global _package
if _package is None:
_package = get_developer_package(os.getcwd())
return _package
def setup_parser_common(parser):
"""Parser setup common to both rez-build and rez-release."""
from rez.build_process import get_build_process_types
from rez.build_system import get_valid_build_systems
from rez.exceptions import PackageMetadataError
process_types = get_build_process_types()
parser.add_argument(
"--process", type=str, choices=process_types, default="local",
help="the build process to use (default: %(default)s).")
# add build system choices valid for this package
try:
package = get_current_developer_package()
except PackageMetadataError:
package = None # no package, or bad package
clss = get_valid_build_systems(os.getcwd(), package=package)
if clss:
if len(clss) == 1:
cls_ = clss[0]
title = "%s build system arguments" % cls_.name()
group = parser.add_argument_group(title)
cls_.bind_cli(parser, group)
types = [x.name() for x in clss]
else:
types = None
parser.add_argument(
"-b", "--build-system", dest="buildsys", choices=types,
help="the build system to use. If not specified, it is detected. Set "
"'build_system' or 'build_command' to specify the build system in the "
"package itself.")
parser.add_argument(
"--variants", nargs='+', type=int, metavar="INDEX",
help="select variants to build (zero-indexed).")
parser.add_argument(
"--ba", "--build-args", dest="build_args", metavar="ARGS",
help="arguments to pass to the build system. Alternatively, list these "
"after a '--'.")
parser.add_argument(
"--cba", "--child-build-args", dest="child_build_args", metavar="ARGS",
help="arguments to pass to the child build system, if any. "
"Alternatively, list these after a second '--'.")
def setup_parser(parser, completions=False):
parser.add_argument(
"-c", "--clean", action="store_true",
help="clear the current build before rebuilding.")
parser.add_argument(
"-i", "--install", action="store_true",
help="install the build to the local packages path. Use --prefix to "
"choose a custom install path.")
parser.add_argument(
"-p", "--prefix", type=str, metavar='PATH',
help="install to a custom package repository path.")
parser.add_argument(
"--fail-graph", action="store_true",
help="if the build environment fails to resolve due to a conflict, "
"display the resolve graph as an image.")
parser.add_argument(
"-s", "--scripts", action="store_true",
help="create build scripts rather than performing the full build. "
"Running these scripts will place you into a build environment, where "
"you can invoke the build system directly.")
parser.add_argument(
"--view-pre", action="store_true",
help="just view the preprocessed package definition, and exit.")
setup_parser_common(parser)
def METHOD_NAME(opts, parser, extra_arg_groups):
attrs = ["build_args", "child_build_args"]
groups = (extra_arg_groups or [[]]) + [[]]
result_groups = []
for attr, group in zip(attrs, groups):
cli_attr = "--%s" % attr.replace("_", "-")
option = getattr(opts, attr, None)
if option:
if group:
parser.error("argument %s: not allowed with arguments after '--'"
% cli_attr)
group = option.strip().split()
result_groups.append(group)
return result_groups[0], result_groups[1]
def command(opts, parser, extra_arg_groups=None):
from rez.exceptions import BuildContextResolveError
from rez.build_process import create_build_process
from rez.build_system import create_build_system
from rez.serialise import FileFormat
import sys
# load package
working_dir = os.getcwd()
package = get_current_developer_package()
if opts.view_pre:
package.print_info(format_=FileFormat.py, skip_attributes=["preprocess"])
sys.exit(0)
# create build system
build_args, child_build_args = METHOD_NAME(opts, parser, extra_arg_groups)
buildsys = create_build_system(working_dir,
package=package,
buildsys_type=opts.buildsys,
opts=opts,
write_build_scripts=opts.scripts,
verbose=True,
build_args=build_args,
child_build_args=child_build_args)
# create and execute build process
builder = create_build_process(opts.process,
working_dir,
build_system=buildsys,
verbose=True)
try:
builder.build(install_path=opts.prefix,
clean=opts.clean,
install=opts.install,
variants=opts.variants)
except BuildContextResolveError as e:
print(str(e), file=sys.stderr)
if opts.fail_graph:
if e.context.graph:
from rez.utils.graph_utils import view_graph
g = e.context.graph(as_dot=True)
view_graph(g)
else:
print("the failed resolve context did not generate a graph.",
file=sys.stderr)
sys.exit(1) |
298,808 | write | # License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2021 Intel Corporation. All Rights Reserved.
import sys, os
# We're usually the first to be imported, and so the first see the original arguments as passed
# into sys.argv... remember them before we change:
# (NOTE: sys.orig_argv is available as of 3.10)
original_args = sys.argv[1:]
def METHOD_NAME( s ):
"""
When s is long, write() doesn't seem to work right and only part of the string gets written!
"""
x = 0
chunk = 8192
while( x < len(s) ):
sys.stdout.write( s[x:x+chunk] )
x += chunk
# Set up the default output system; if not a terminal, disable colors!
def _stream_has_color( stream ):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum( "colors" ) > 2
except:
# guess false in case of error
return False
_have_no_color = False
if '--color' in sys.argv:
sys.argv.remove( '--color' )
_have_color = True
elif '--no-color' in sys.argv:
sys.argv.remove( '--no-color' )
_have_color = False
_have_no_color = True
else:
_have_color = _stream_has_color( sys.stdout )
if _have_color:
red = '\033[91m'
yellow = '\033[93m'
gray = '\033[90m'
reset = '\033[0m'
cr = '\033[G'
clear_eos = '\033[J'
clear_eol = '\033[K'
_progress = ''
def out( *args, sep = ' ', end = '\n', line_prefix = None, color = None ):
global _progress, reset
s = indent( sep.join( [str(s) for s in args] ), line_prefix )
if color:
s = color + s + reset
METHOD_NAME( s )
clear_to_eol = len(_progress) > 0 and end and end[-1] == '\n'
if clear_to_eol:
sys.stdout.write( clear_eol + end )
progress( *_progress )
else:
if end:
sys.stdout.write( end )
def progress(*args):
global _progress
sys.stdout.flush()
sys.stdout.write( '\0337' ) # save cursor
print( *args, end = clear_eol )
sys.stdout.write( '\0338' ) # restore cursor
_progress = args
else:
red = yellow = gray = reset = cr = clear_eos = ''
def out( *args, sep = ' ', end = '\n', line_prefix = None, color = None ):
s = indent( sep.join( [str(s) for s in args] ), line_prefix )
METHOD_NAME( s )
if end:
sys.stdout.write( end )
def progress(*args):
if args:
print( *args )
sys.stdout.flush()
def is_color_on():
global _have_color
return _have_color
def is_color_disabled():
global _have_no_color
return _have_no_color
def quiet_on():
global out
def out(*args):
pass
def indent( str, line_prefix = ' ' ):
if line_prefix:
str = line_prefix + str.replace( '\n', '\n' + line_prefix )
return str
_verbose_on = False
def v(*args):
pass
def verbose_on():
global v, _verbose_on
def v(*args):
global gray, reset
out( gray + '-V-', *args, reset )
_verbose_on = True
def is_verbose_on():
global _verbose_on
return _verbose_on
_debug_on = False
_debug_indent = ''
def d(*args):
# Return whether info was output
return False
def debug_on():
global d, _debug_on, _debug_indent
def d( *args ):
global gray
out( *args, line_prefix = "-D- " + _debug_indent, color = gray )
return True
_debug_on = True
def is_debug_on():
global _debug_on
return _debug_on
if '--debug' in sys.argv:
sys.argv.remove( '--debug' )
debug_on()
def debug_indent( n = 1, indentation = ' ' ):
global _debug_indent
_debug_indent += n * indentation
def debug_unindent( n = 1, indentation = ' ' ):
global _debug_indent
_debug_indent = _debug_indent[:-n * len(indentation)]
def i( *args ):
out( '-I-', *args)
def f( *args ):
out( '-F-', *args )
sys.exit(1)
# We track the number of errors
_n_errors = 0
def e( *args ):
global red, reset
out( *args, line_prefix = red + '-E-' + reset + ' ' )
global _n_errors
_n_errors = _n_errors + 1
def n_errors():
global _n_errors
return _n_errors
def reset_errors():
global _n_errors
_n_errors = 0
# We track the number of warnings
_n_warnings = 0
def w(*args):
global red, reset
out( *args, line_prefix = yellow + '-W-' + reset + ' ' )
global _n_warnings
_n_warnings = _n_warnings + 1
def n_warnings():
global _n_warnings
return _n_warnings
def reset_warnings():
global _n_warnings
_n_warnings = 0
def split():
"""
Output an easy-to-distinguish line separating text above from below.
Currently a line of "_____"
"""
try:
screen_width = os.get_terminal_size().columns
except:
# this happens under github actions, for example, or when a terminal does not exist
screen_width = 60
out( '\n' + '_' * screen_width )
|
298,809 | retrieve | from dateutil.relativedelta import relativedelta
from django.http import StreamingHttpResponse
from django.utils import timezone
from .models import ListModel
from rest_framework import viewsets
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from .filter import Filter
from rest_framework.generics import RetrieveAPIView,GenericAPIView
from rest_framework.viewsets import ViewSetMixin
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from stock.models import StockBinModel
from dn.models import DnDetailModel
from dn.filter import DnDetailFilter
class SannerDnDetailPickingListView(viewsets.ModelViewSet):
"""
list:
Response a data list(all)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = DnDetailFilter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return DnDetailModel.objects.filter(openid=self.request.auth.openid, is_delete=False)
else:
return DnDetailModel.objects.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return DnDetailModel.objects.none()
def get_serializer_class(self):
if self.action in ['list', 'retrieve', 'destroy']:
return serializers.SannerDnDetailPickingListGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
class ListViewSet(viewsets.ModelViewSet):
"""
list:
Response a data list(all)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, id=id)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.ListGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
class SannerView(viewsets.ModelViewSet):
"""
Retrieve:
Response a data retrieve
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
lookup_field = 'bar_code'
def get_project(self):
try:
bar_code = self.kwargs['bar_code']
return bar_code
except:
return None
def get_queryset(self):
bar_code = self.get_project()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, bar_code=bar_code)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['retrieve']:
return serializers.ListGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
def METHOD_NAME(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data |
298,810 | access zipped assets | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import contextlib
import hashlib
import importlib
import os
import shutil
import tempfile
from hashlib import sha1
from site import makepath # type: ignore[attr-defined]
from pex import hashing
from pex.common import filter_pyc_dirs, filter_pyc_files, safe_mkdir, safe_mkdtemp
from pex.compatibility import ( # type: ignore[attr-defined] # `exec_function` is defined dynamically
PY2,
exec_function,
)
from pex.orderedset import OrderedSet
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import IO, Any, Callable, Iterator, Optional
from pex.hashing import Hasher
class DistributionHelper(object):
# TODO(#584: This appears unused, but clients might still use it. We cannot remove until we
# have a deprecation policy.
@classmethod
def METHOD_NAME(cls, static_module_name, static_path, dir_location=None):
# type: (str, str, Optional[str]) -> str
"""Create a copy of static resource files as we can't serve them from within the pex file.
:param static_module_name: Module name containing module to cache in a tempdir
:param static_path: Module name, for example 'serverset'
:param dir_location: create a new temporary directory inside, or None to have one created
:returns temp_dir: Temporary directory with the zipped assets inside
"""
if dir_location is None:
temp_dir = safe_mkdtemp()
else:
temp_dir = dir_location
module = importlib.import_module(static_module_name)
# N.B.: This handles namespace packages new and old.
paths = OrderedSet(os.path.realpath(d) for d in getattr(module, "__path__", []))
if module.__file__:
# And this handles old-style __init__.py packages.
paths.add(os.path.realpath(module.__file__))
safe_mkdir(temp_dir)
for path in paths:
resource_dir = os.path.realpath(os.path.join(path, static_path))
if os.path.isdir(resource_dir):
for root, dirs, files in os.walk(resource_dir):
for d in dirs:
safe_mkdir(
os.path.join(
temp_dir, os.path.relpath(os.path.join(root, d), resource_dir)
)
)
for f in files:
src = os.path.join(root, f)
shutil.copy(src, os.path.join(temp_dir, os.path.relpath(src, resource_dir)))
return temp_dir
class CacheHelper(object):
@classmethod
def hash(cls, path, digest=None, hasher=sha1):
# type: (str, Optional[Hasher], Callable[[], Hasher]) -> str
"""Return the digest of a single file in a memory-efficient manner."""
if digest is None:
digest = hasher()
hashing.file_hash(path, digest)
return digest.hexdigest()
@classmethod
def pex_code_hash(cls, directory):
# type: (str) -> str
"""Return a reproducible hash of the contents of a loose PEX; excluding all `.pyc` files."""
digest = hashlib.sha1()
hashing.dir_hash(
directory=directory,
digest=digest,
dir_filter=filter_pyc_dirs,
file_filter=lambda files: (f for f in filter_pyc_files(files) if not f.startswith(".")),
)
return digest.hexdigest()
@classmethod
def dir_hash(cls, directory, digest=None, hasher=sha1):
# type: (str, Optional[Hasher], Callable[[], Hasher]) -> str
"""Return a reproducible hash of the contents of a directory; excluding all `.pyc` files."""
if digest is None:
digest = hasher()
hashing.dir_hash(
directory=directory,
digest=digest,
dir_filter=filter_pyc_dirs,
file_filter=filter_pyc_files,
)
return digest.hexdigest()
@classmethod
def zip_hash(
cls,
zip_path, # type: str
relpath=None, # type: Optional[str]
):
# type: (...) -> str
"""Return a reproducible hash of the contents of a zip; excluding all `.pyc` files."""
digest = hashlib.sha1()
hashing.zip_hash(
zip_path=zip_path,
digest=digest,
relpath=relpath,
dir_filter=filter_pyc_dirs,
file_filter=filter_pyc_files,
)
return digest.hexdigest()
@contextlib.contextmanager
def named_temporary_file(**kwargs):
# type: (**Any) -> Iterator[IO]
"""Due to a bug in python (https://bugs.python.org/issue14243), we need this to be able to use
the temporary file without deleting it."""
assert "delete" not in kwargs
kwargs["delete"] = False
fp = tempfile.NamedTemporaryFile(**kwargs)
try:
with fp:
yield fp
finally:
os.remove(fp.name) |
298,811 | test erfcinv behavior | import unittest
import numpy
import cupy
from cupy import testing
import cupyx.scipy.special # NOQA
def _boundary_inputs(boundary, rtol, atol):
left = boundary * (1 - numpy.copysign(rtol, boundary)) - atol
right = boundary * (1 + numpy.copysign(rtol, boundary)) + atol
return [left, boundary, right]
class _TestBase(object):
def test_erf(self):
self.check_unary('erf')
def test_erfc(self):
self.check_unary('erfc')
def test_erfcx(self):
self.check_unary('erfcx')
@testing.with_requires('scipy>=1.4.0')
def test_erfinv(self):
self.check_unary('erfinv')
self.check_unary_random('erfinv', scale=2, offset=-1)
self.check_unary_boundary('erfinv', boundary=-1)
self.check_unary_boundary('erfinv', boundary=1)
@testing.with_requires('scipy>=1.4.0')
def test_erfcinv(self):
self.check_unary('erfcinv')
self.check_unary_random('erfcinv', scale=2, offset=0)
self.check_unary_boundary('erfcinv', boundary=0)
self.check_unary_boundary('erfcinv', boundary=2)
@testing.with_requires('scipy')
class TestSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
return getattr(scp.special, name)(a)
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def test_erfinv_behavior(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 1.0 + 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = -1.0 - 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = 1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = -1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def METHOD_NAME(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 2.0 + 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0 - 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = 2.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.with_requires('scipy')
class TestFusionSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a) |
298,812 | test basic access | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import threading
from functools import partial
from random import Random
import pytest
from hypothesis import (
HealthCheck,
assume,
example,
given,
note,
settings,
strategies as st,
)
from hypothesis.internal.cache import GenericCache, LRUReusedCache
class LRUCache(GenericCache):
__slots__ = ("__tick",)
def __init__(self, max_size):
super().__init__(max_size)
self.__tick = 0
def new_entry(self, key, value):
return self.tick()
def on_access(self, key, value, score):
return self.tick()
def tick(self):
self.__tick += 1
return self.__tick
class LFUCache(GenericCache):
def new_entry(self, key, value):
return 1
def on_access(self, key, value, score):
return score + 1
@st.composite
def write_pattern(draw, min_size=0):
keys = draw(st.lists(st.integers(0, 1000), unique=True, min_size=1))
values = draw(st.lists(st.integers(), unique=True, min_size=1))
return draw(
st.lists(
st.tuples(st.sampled_from(keys), st.sampled_from(values)), min_size=min_size
)
)
class ValueScored(GenericCache):
def new_entry(self, key, value):
return value
class RandomCache(GenericCache):
def __init__(self, max_size):
super().__init__(max_size)
self.random = Random(0)
def new_entry(self, key, value):
return self.random.random()
def on_access(self, key, value, score):
return self.random.random()
@pytest.mark.parametrize(
"implementation", [LRUCache, LFUCache, LRUReusedCache, ValueScored, RandomCache]
)
@example(writes=[(0, 0), (3, 0), (1, 0), (2, 0), (2, 0), (1, 0)], size=4)
@example(writes=[(0, 0)], size=1)
@example(writes=[(1, 0), (2, 0), (0, -1), (1, 0)], size=3)
@given(write_pattern(), st.integers(1, 10))
def test_behaves_like_a_dict_with_losses(implementation, writes, size):
model = {}
target = implementation(max_size=size)
for k, v in writes:
try:
assert model[k] == target[k]
except KeyError:
pass
model[k] = v
target[k] = v
target.check_valid()
assert target[k] == v
for r, s in model.items():
try:
assert s == target[r]
except KeyError:
pass
assert len(target) <= min(len(model), size)
@settings(suppress_health_check=[HealthCheck.too_slow], deadline=None)
@given(write_pattern(min_size=2), st.data())
def test_always_evicts_the_lowest_scoring_value(writes, data):
scores = {}
n_keys = len({k for k, _ in writes})
assume(n_keys > 1)
size = data.draw(st.integers(1, n_keys - 1))
evicted = set()
def new_score(key):
scores[key] = data.draw(st.integers(0, 1000), label=f"scores[{key!r}]")
return scores[key]
last_entry = [None]
class Cache(GenericCache):
def new_entry(self, key, value):
last_entry[0] = key
evicted.discard(key)
assert key not in scores
return new_score(key)
def on_access(self, key, value, score):
assert key in scores
return new_score(key)
def on_evict(self, key, value, score):
note(f"Evicted {key!r}")
assert score == scores[key]
del scores[key]
if len(scores) > 1:
assert score <= min(v for k, v in scores.items() if k != last_entry[0])
evicted.add(key)
target = Cache(max_size=size)
model = {}
for k, v in writes:
target[k] = v
model[k] = v
assert evicted
assert len(evicted) + len(target) == len(model)
assert len(scores) == len(target)
for k, v in model.items():
try:
assert target[k] == v
assert k not in evicted
except KeyError:
assert k in evicted
def METHOD_NAME():
cache = ValueScored(max_size=2)
cache[1] = 0
cache[1] = 0
cache[0] = 1
cache[2] = 0
assert cache[2] == 0
assert cache[0] == 1
assert len(cache) == 2
def test_can_clear_a_cache():
x = ValueScored(1)
x[0] = 1
assert len(x) == 1
x.clear()
assert len(x) == 0
def test_max_size_cache_ignores():
x = ValueScored(0)
x[0] = 1
with pytest.raises(KeyError):
x[0]
def test_pinning_prevents_eviction():
cache = LRUReusedCache(max_size=10)
cache[20] = 1
cache.pin(20)
for i in range(20):
cache[i] = 0
assert cache[20] == 1
def test_unpinning_allows_eviction():
cache = LRUReusedCache(max_size=10)
cache[20] = True
cache.pin(20)
for i in range(20):
cache[i] = False
assert 20 in cache
cache.unpin(20)
cache[21] = False
assert 20 not in cache
def test_unpins_must_match_pins():
cache = LRUReusedCache(max_size=2)
cache[1] = 1
cache.pin(1)
assert cache.is_pinned(1)
cache.pin(1)
assert cache.is_pinned(1)
cache.unpin(1)
assert cache.is_pinned(1)
cache.unpin(1)
assert not cache.is_pinned(1)
def test_will_error_instead_of_evicting_pin():
cache = LRUReusedCache(max_size=1)
cache[1] = 1
cache.pin(1)
with pytest.raises(ValueError):
cache[2] = 2
def test_will_error_for_bad_unpin():
cache = LRUReusedCache(max_size=1)
cache[1] = 1
with pytest.raises(ValueError):
cache.unpin(1)
def test_still_inserts_if_score_is_worse():
class TC(GenericCache):
def new_entry(self, key, value):
return key
cache = TC(1)
cache[0] = 1
cache[1] = 1
assert 0 not in cache
assert 1 in cache
assert len(cache) == 1
def test_does_insert_if_score_is_better():
class TC(GenericCache):
def new_entry(self, key, value):
return value
cache = TC(1)
cache[0] = 1
cache[1] = 0
assert 0 not in cache
assert 1 in cache
assert len(cache) == 1
def test_double_pinning_does_not_increase_pin_count():
cache = LRUReusedCache(2)
cache[0] = 0
cache.pin(0)
cache.pin(0)
cache[1] = 1
assert len(cache) == 2
def test_can_add_new_keys_after_unpinning():
cache = LRUReusedCache(1)
cache[0] = 0
cache.pin(0)
cache.unpin(0)
cache[1] = 1
assert len(cache) == 1
assert 1 in cache
def test_iterates_over_remaining_keys():
cache = LRUReusedCache(2)
for i in range(3):
cache[i] = "hi"
assert sorted(cache) == [1, 2]
def test_cache_is_threadsafe_issue_2433_regression():
errors = []
def target():
for _ in range(1000):
try:
st.builds(partial(str))
except Exception as exc:
errors.append(exc)
workers = [threading.Thread(target=target) for _ in range(4)]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
assert not errors
def test_pin_and_unpin_are_noops_if_dropped():
# See https://github.com/HypothesisWorks/hypothesis/issues/3169
cache = LRUReusedCache(max_size=10)
cache[30] = True
assert 30 in cache
for i in range(20):
cache[i] = False
assert 30 not in cache
cache.pin(30)
assert 30 not in cache
cache.unpin(30)
assert 30 not in cache |
298,813 | d eps2 d omdot | """The ELL1k model for approximately handling near-circular orbits."""
import astropy.constants as c
import astropy.units as u
import numpy as np
from .ELL1_model import ELL1model
class ELL1kmodel(ELL1model):
"""This is a class for base ELL1k pulsar binary model.
ELL1k model is a generalization of ELL1 model to handle systems with
large advance of periastron.
References
----------
- Susobhanan et al. (2018), MNRAS, 480 (4), 5260-5271 [1]_
.. [1] https://ui.adsabs.harvard.edu/abs/2018MNRAS.480.5260S/abstract
"""
def __init__(self):
super().__init__()
self.binary_name = "ELL1k"
self.binary_delay_funcs = [self.ELL1kdelay]
self.d_binarydelay_d_par_funcs = [self.d_ELL1kdelay_d_par]
self.param_default_value.pop("EPS1DOT")
self.param_default_value.pop("EPS2DOT")
self.param_default_value.pop("EDOT")
self.param_default_value.update(
{"OMDOT": u.Quantity(0, "deg/year"), "LNEDOT": u.Quantity(0, "1/year")}
)
self.binary_params = list(self.param_default_value.keys())
self.set_param_values() # Set parameters to default values.
# self.orbits_func = self.orbits_ELL1
@property
def tt0(self):
return self.ttasc()
###############################
def eps1(self):
"""EPS1 as a function of time
Susobhanan+ 2018 Eq. 15"""
eps10 = self.EPS1
eps20 = self.EPS2
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * (
eps10 * np.cos(omdot * dt) + eps20 * np.sin(omdot * dt)
)
def d_eps1_d_EPS1(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * np.cos(omdot * dt)
def d_eps1_d_EPS2(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * np.sin(omdot * dt)
def d_eps1_d_OMDOT(self):
dt = self.ttasc()
return self.eps2() * dt
def d_eps1_d_LNEDOT(self):
lnedot = self.LNEDOT
dt = self.ttasc()
return self.eps1() * dt / (1 + lnedot * dt)
def d_eps1_d_TASC(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return -self.eps1() * lnedot / (1 + lnedot * dt) - self.eps2() * omdot
def eps2(self):
"""EPS2 as a function of time
Susobhanan+ 2018 Eq. 15"""
eps10 = self.EPS1
eps20 = self.EPS2
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * (
eps20 * np.cos(omdot * dt) - eps10 * np.sin(omdot * dt)
)
def d_eps2_d_EPS1(self):
return -self.d_eps1_d_EPS2()
def d_eps2_d_EPS2(self):
return -self.d_eps1_d_EPS1()
def METHOD_NAME(self):
dt = self.ttasc()
return -self.eps1() * dt
def d_eps2_d_LNEDOT(self):
lnedot = self.LNEDOT
dt = self.ttasc()
return self.eps2() * dt / (1 + lnedot * dt)
def d_eps2_d_TASC(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return -self.eps2() * lnedot / (1 + lnedot * dt) + self.eps1() * omdot
def delayR(self):
"""ELL1k Roemer delay in proper time.
A Susobhanan et al 2018 Eq. 6
There is an extra term (-3*a1*eps1)/(2*c) as compared to the ELL1 model."""
Phi = self.Phi()
return (
self.a1()
/ c.c
* (
np.sin(Phi)
+ 0.5
* (self.eps2() * np.sin(2 * Phi) - self.eps1() * (np.cos(2 * Phi) + 3))
)
).decompose()
def d_Dre_d_par(self, par):
"""Derivative computation.
Computes::
Dre = delayR = a1/c.c*(sin(phi) - 0.5* eps1*(cos(2*phi) + 3) + 0.5* eps2*sin(2*phi))
d_Dre_d_par = d_a1_d_par/c.c * (sin(phi) - 0.5* eps1*(cos(2*phi) + 3) + 0.5* eps2*sin(2*phi))
+ d_Dre_d_Phi * d_Phi_d_par
+ d_Dre_d_eps1 * d_eps1_d_par
+ d_Dre_d_eps2 * d_eps2_d_par
"""
a1 = self.a1()
Phi = self.Phi()
eps1 = self.eps1()
eps2 = self.eps2()
d_a1_d_par = self.prtl_der("a1", par)
d_Dre_d_Phi = self.Drep()
d_Phi_d_par = self.prtl_der("Phi", par)
d_Dre_d_eps1 = a1 / c.c * (-0.5 * (np.cos(2 * Phi) + 3))
d_Dre_d_eps2 = a1 / c.c * (0.5 * np.sin(2 * Phi))
with u.set_enabled_equivalencies(u.dimensionless_angles()):
d_Dre_d_par = (
d_a1_d_par
/ c.c
* (
np.sin(Phi)
- 0.5 * eps1 * (np.cos(2 * Phi) + 3)
+ 0.5 * eps2 * np.sin(2 * Phi)
)
+ d_Dre_d_Phi * d_Phi_d_par
+ d_Dre_d_eps1 * self.prtl_der("eps1", par)
+ d_Dre_d_eps2 * self.prtl_der("eps2", par)
)
return d_Dre_d_par
def ELL1kdelay(self):
# TODO add aberration delay
return self.delayI() + self.delayS()
def d_ELL1kdelay_d_par(self, par):
return self.d_delayI_d_par(par) + self.d_delayS_d_par(par) |
298,814 | test list credentials no credentials | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Novell, Inc.
# This library is free software; you can redistribute it and/or modify
# it only under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
try:
import unittest2 as unittest
except ImportError:
import unittest
import os.path
import sys
try:
from unittest.mock import MagicMock, call, patch
except ImportError:
from mock import MagicMock, call, patch
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from helper import ConsoleRecorder, read_data_from_fixture
from spacewalk.susemanager.mgr_sync.cli import get_options
from spacewalk.susemanager.mgr_sync.mgr_sync import MgrSync
from spacewalk.susemanager.mgr_sync import logger
class CredentialOperationsTest(unittest.TestCase):
def setUp(self):
self.mgr_sync = MgrSync()
self.mgr_sync.conn = MagicMock()
self.mgr_sync.log = self.mgr_sync.__init__logger = MagicMock(
return_value=logger.Logger(3, "tmp.log"))
self.fake_auth_token = "fake_token"
self.mgr_sync.auth.token = MagicMock(
return_value=self.fake_auth_token)
self.mgr_sync.config.write = MagicMock()
self.mgr_sync.conn.sync.master.hasMaster = MagicMock(return_value=False)
def tearDown(self):
if os.path.exists("tmp.log"):
os.unlink("tmp.log")
def METHOD_NAME(self):
""" Test listing credentials with none present """
options = get_options("list credentials".split())
stubbed_xmlrpm_call = MagicMock(return_value=[])
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with ConsoleRecorder() as recorder:
self.mgr_sync.run(options)
self.assertEqual(recorder.stdout, ["No credentials found"])
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"listCredentials",
self.fake_auth_token)
def test_list_credentials(self):
""" Test listing credentials """
options = get_options("list credentials".split())
stubbed_xmlrpm_call = MagicMock(
return_value=read_data_from_fixture(
'list_credentials.data'))
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with ConsoleRecorder() as recorder:
self.mgr_sync.run(options)
expected_output = """Credentials:
foo (primary)
bar"""
self.assertEqual(expected_output.split("\n"), recorder.stdout)
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"listCredentials",
self.fake_auth_token)
def test_list_credentials_interactive(self):
""" Test listing credentials when interactive mode is set """
stubbed_xmlrpm_call = MagicMock(
return_value=read_data_from_fixture("list_credentials.data"))
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
credentials = []
with ConsoleRecorder() as recorder:
credentials = self.mgr_sync._list_credentials(show_interactive_numbers=True)
expected_output = """Credentials:
01) foo (primary)
02) bar"""
self.assertEqual(expected_output.split("\n"), recorder.stdout)
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"listCredentials",
self.fake_auth_token)
def test_add_credentials_interactive(self):
""" Test adding credentials interactively """
options = get_options("add credentials".split())
self.mgr_sync._fetch_credentials = MagicMock(
return_value=read_data_from_fixture("list_credentials.data"))
stubbed_xmlrpm_call = MagicMock()
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with patch('spacewalk.susemanager.mgr_sync.mgr_sync.cli_ask') as mock:
mock.side_effect = ["foobar", "foo", "foo"]
with ConsoleRecorder() as recorder:
self.assertEqual(0, self.mgr_sync.run(options))
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"addCredentials",
self.fake_auth_token,
"foobar",
"foo",
False)
self.assertEqual(recorder.stdout, ["Successfully added credentials."])
def test_add_credentials_non_interactive(self):
""" Test adding credentials non-interactively """
options = get_options("add credentials foobar foo".split())
self.mgr_sync._fetch_credentials = MagicMock(
return_value=read_data_from_fixture("list_credentials.data"))
stubbed_xmlrpm_call = MagicMock()
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with ConsoleRecorder() as recorder:
self.assertEqual(0, self.mgr_sync.run(options))
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"addCredentials",
self.fake_auth_token,
"foobar",
"foo",
False)
def test_delete_credentials_interactive(self):
""" Test deleting credentials interactively """
options = get_options("delete credentials".split())
self.mgr_sync._fetch_credentials = MagicMock(
return_value=read_data_from_fixture("list_credentials.data"))
stubbed_xmlrpm_call = MagicMock()
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with patch('spacewalk.susemanager.mgr_sync.mgr_sync.cli_ask') as mock:
mock.side_effect = ["1", "y"]
with ConsoleRecorder() as recorder:
self.assertEqual(0, self.mgr_sync.run(options))
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"deleteCredentials",
self.fake_auth_token,
"foo")
self.assertEqual([recorder.stdout[-1]], ["Successfully deleted credentials: foo"])
def test_delete_credentials_non_interactive(self):
""" Test deleting credentials non-interactively """
options = get_options("delete credentials foo".split())
self.mgr_sync._fetch_credentials = MagicMock(
return_value=read_data_from_fixture("list_credentials.data"))
stubbed_xmlrpm_call = MagicMock()
self.mgr_sync._execute_xmlrpc_method = stubbed_xmlrpm_call
with ConsoleRecorder() as recorder:
self.assertEqual(0, self.mgr_sync.run(options))
stubbed_xmlrpm_call.assert_called_once_with(
self.mgr_sync.conn.sync.content,
"deleteCredentials",
self.fake_auth_token,
"foo")
self.assertEqual(recorder.stdout, ["Successfully deleted credentials: foo"]) |
298,815 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._peering_service_providers_operations import build_list_request
from .._vendor import PeeringManagementClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PeeringServiceProvidersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.peering.aio.PeeringManagementClient`'s
:attr:`peering_service_providers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.PeeringServiceProvider"]:
"""Lists all of the available peering service locations for the specified kind of peering.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeeringServiceProvider or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.peering.models.PeeringServiceProvider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PeeringServiceProviderListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PeeringServiceProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringServiceProviders"} # type: ignore |
298,816 | test module name suffix | from pathlib import Path
import pytest
import responses
from reactpy.testing import assert_reactpy_did_log
from reactpy.web.utils import (
module_name_suffix,
resolve_module_exports_from_file,
resolve_module_exports_from_source,
resolve_module_exports_from_url,
)
JS_FIXTURES_DIR = Path(__file__).parent / "js_fixtures"
@pytest.mark.parametrize(
"name, suffix",
[
("module", ".js"),
("module.ext", ".ext"),
("module@x.y.z", ".js"),
("module.ext@x.y.z", ".ext"),
("@namespace/module", ".js"),
("@namespace/module.ext", ".ext"),
("@namespace/module@x.y.z", ".js"),
("@namespace/module.ext@x.y.z", ".ext"),
],
)
def METHOD_NAME(name, suffix):
assert module_name_suffix(name) == suffix
@responses.activate
def test_resolve_module_exports_from_file(caplog):
responses.add(
responses.GET,
"https://some.external.url",
body="export {something as ExternalUrl}",
)
path = JS_FIXTURES_DIR / "export-resolution" / "index.js"
assert resolve_module_exports_from_file(path, 4) == {
"Index",
"One",
"Two",
"ExternalUrl",
}
def test_resolve_module_exports_from_file_log_on_max_depth(caplog):
path = JS_FIXTURES_DIR / "export-resolution" / "index.js"
assert resolve_module_exports_from_file(path, 0) == set()
assert len(caplog.records) == 1
assert caplog.records[0].message.endswith("max depth reached")
caplog.records.clear()
assert resolve_module_exports_from_file(path, 2) == {"Index", "One"}
assert len(caplog.records) == 1
assert caplog.records[0].message.endswith("max depth reached")
def test_resolve_module_exports_from_file_log_on_unknown_file_location(
caplog, tmp_path
):
file = tmp_path / "some.js"
file.write_text("export * from './does-not-exist.js';")
resolve_module_exports_from_file(file, 2)
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith(
"Did not resolve exports for unknown file"
)
@responses.activate
def test_resolve_module_exports_from_url():
responses.add(
responses.GET,
"https://some.url/first.js",
body="export const First = 1; export * from 'https://another.url/path/second.js';",
)
responses.add(
responses.GET,
"https://another.url/path/second.js",
body="export const Second = 2; export * from '../third.js';",
)
responses.add(
responses.GET,
"https://another.url/third.js",
body="export const Third = 3; export * from './fourth.js';",
)
responses.add(
responses.GET,
"https://another.url/fourth.js",
body="export const Fourth = 4;",
)
assert resolve_module_exports_from_url("https://some.url/first.js", 4) == {
"First",
"Second",
"Third",
"Fourth",
}
def test_resolve_module_exports_from_url_log_on_max_depth(caplog):
assert resolve_module_exports_from_url("https://some.url", 0) == set()
assert len(caplog.records) == 1
assert caplog.records[0].message.endswith("max depth reached")
def test_resolve_module_exports_from_url_log_on_bad_response(caplog):
assert resolve_module_exports_from_url("https://some.url", 1) == set()
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith("Did not resolve exports for url")
@pytest.mark.parametrize(
"text",
[
"export default expression;",
"export default function (…) { … } // also class, function*",
"export default function name1(…) { … } // also class, function*",
"export { something as default };",
"export { default } from 'some-source';",
"export { something as default } from 'some-source';",
],
)
def test_resolve_module_default_exports_from_source(text):
names, references = resolve_module_exports_from_source(text, exclude_default=False)
assert names == {"default"} and not references
def test_resolve_module_exports_from_source():
fixture_file = JS_FIXTURES_DIR / "exports-syntax.js"
names, references = resolve_module_exports_from_source(
fixture_file.read_text(), exclude_default=False
)
assert names == (
{f"name{i}" for i in range(1, 21)}
| {
"functionName",
"ClassName",
}
) and references == {"https://source1.com", "https://source2.com"}
def test_log_on_unknown_export_type():
with assert_reactpy_did_log(match_message="Unknown export type "):
assert resolve_module_exports_from_source(
"export something unknown;", exclude_default=False
) == (set(), set()) |
298,817 | terminate | import logging
from datetime import datetime, timezone
from typing import Any, Mapping, Optional, Sequence, TypedDict, Union, cast
from arroyo import Topic
from arroyo.backends.kafka import KafkaConsumer, KafkaPayload
from arroyo.backends.kafka.configuration import build_kafka_consumer_configuration
from arroyo.commit import ONCE_PER_SECOND
from arroyo.processing import StreamProcessor
from arroyo.processing.strategies import ProcessingStrategy, ProcessingStrategyFactory
from arroyo.types import Commit, Message, Partition
from django.conf import settings
from typing_extensions import NotRequired
from sentry.constants import DataCategory
from sentry.sentry_metrics.indexer.strings import SHARED_TAG_STRINGS, TRANSACTION_METRICS_NAMES
from sentry.sentry_metrics.use_case_id_registry import UseCaseID
from sentry.sentry_metrics.utils import reverse_resolve_tag_value
from sentry.utils import json
from sentry.utils.kafka_config import get_kafka_consumer_cluster_options, get_topic_definition
from sentry.utils.outcomes import Outcome, track_outcome
logger = logging.getLogger(__name__)
def get_metrics_billing_consumer(
group_id: str,
auto_offset_reset: str,
strict_offset_reset: bool,
force_topic: Union[str, None],
force_cluster: Union[str, None],
) -> StreamProcessor[KafkaPayload]:
topic = force_topic or settings.KAFKA_SNUBA_GENERIC_METRICS
bootstrap_servers = _get_bootstrap_servers(topic, force_cluster)
return StreamProcessor(
consumer=KafkaConsumer(
build_kafka_consumer_configuration(
default_config={},
group_id=group_id,
strict_offset_reset=strict_offset_reset,
auto_offset_reset=auto_offset_reset,
bootstrap_servers=bootstrap_servers,
),
),
topic=Topic(topic),
processor_factory=BillingMetricsConsumerStrategyFactory(),
commit_policy=ONCE_PER_SECOND,
)
def _get_bootstrap_servers(topic: str, force_cluster: Union[str, None]) -> Sequence[str]:
cluster = force_cluster or get_topic_definition(topic)["cluster"]
options = get_kafka_consumer_cluster_options(cluster)
servers = options["bootstrap.servers"]
if isinstance(servers, (list, tuple)):
return servers
return [servers]
class BillingMetricsConsumerStrategyFactory(ProcessingStrategyFactory[KafkaPayload]):
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
return BillingTxCountMetricConsumerStrategy(commit)
class MetricsBucket(TypedDict):
"""
Metrics bucket as decoded from kafka.
Only defines the fields that are relevant for this consumer."""
org_id: int
project_id: int
metric_id: int
timestamp: int
value: Any
tags: Union[Mapping[str, str], Mapping[str, int]]
# not used here but allows us to use the TypedDict for assignments
type: NotRequired[str]
class BillingTxCountMetricConsumerStrategy(ProcessingStrategy[KafkaPayload]):
"""A metrics consumer that generates a billing outcome for each processed
transaction, processing a bucket at a time. The transaction count is
computed from the amount of values from `d:transactions/duration@millisecond`
buckets.
"""
#: The ID of the metric used to count transactions
metric_id = TRANSACTION_METRICS_NAMES["d:transactions/duration@millisecond"]
profile_tag_key = str(SHARED_TAG_STRINGS["has_profile"])
def __init__(
self,
commit: Commit,
) -> None:
self.__commit = commit
self.__closed = False
def poll(self) -> None:
pass
def METHOD_NAME(self) -> None:
self.close()
def close(self) -> None:
self.__closed = True
def submit(self, message: Message[KafkaPayload]) -> None:
assert not self.__closed
payload = self._get_payload(message)
self._produce_billing_outcomes(payload)
self.__commit(message.committable)
def _get_payload(self, message: Message[KafkaPayload]) -> MetricsBucket:
payload = json.loads(message.payload.value.decode("utf-8"), use_rapid_json=True)
return cast(MetricsBucket, payload)
def _count_processed_items(self, bucket_payload: MetricsBucket) -> Mapping[DataCategory, int]:
if bucket_payload["metric_id"] != self.metric_id:
return {}
value = bucket_payload["value"]
try:
quantity = len(value)
except TypeError:
# Unexpected value type for this metric ID, skip.
return {}
items = {DataCategory.TRANSACTION: quantity}
if self._has_profile(bucket_payload):
# The bucket is tagged with the "has_profile" tag,
# so we also count the quantity of this bucket towards profiles.
# This assumes a "1 to 0..1" relationship between transactions and profiles.
items[DataCategory.PROFILE] = quantity
return items
def _has_profile(self, bucket: MetricsBucket) -> bool:
return bool(
(tag_value := bucket["tags"].get(self.profile_tag_key))
and "true"
== reverse_resolve_tag_value(UseCaseID.TRANSACTIONS, bucket["org_id"], tag_value)
)
def _produce_billing_outcomes(self, payload: MetricsBucket) -> None:
for category, quantity in self._count_processed_items(payload).items():
self._produce_billing_outcome(
org_id=payload["org_id"],
project_id=payload["project_id"],
category=category,
quantity=quantity,
)
def _produce_billing_outcome(
self, *, org_id: int, project_id: int, category: DataCategory, quantity: int
) -> None:
if quantity < 1:
return
# track_outcome does not guarantee to deliver the outcome, making this
# an at-most-once delivery.
#
# If it turns out that we drop too many outcomes on shutdown,
# we may have to revisit this part to achieve a
# better approximation of exactly-once delivery.
track_outcome(
org_id=org_id,
project_id=project_id,
key_id=None,
outcome=Outcome.ACCEPTED,
reason=None,
timestamp=datetime.now(timezone.utc),
event_id=None,
category=category,
quantity=quantity,
)
def join(self, timeout: Optional[float] = None) -> None:
self.__commit({}, force=True) |
298,818 | end | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubscriberFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SubscriberFeatures()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSubscriberFeatures(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SubscriberFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubscriberFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PatternBasedSubscription(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PublicationTrustlevels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def SubscriptionRevocation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def EventHistory(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def AcknowledgeSubscriberReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def SubscriberFeaturesStart(builder): builder.StartObject(8)
def Start(builder):
return SubscriberFeaturesStart(builder)
def SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def AddPublisherIdentification(builder, publisherIdentification):
return SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification)
def SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription): builder.PrependBoolSlot(1, patternBasedSubscription, 0)
def AddPatternBasedSubscription(builder, patternBasedSubscription):
return SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription)
def SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels): builder.PrependBoolSlot(2, publicationTrustlevels, 0)
def AddPublicationTrustlevels(builder, publicationTrustlevels):
return SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels)
def SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation): builder.PrependBoolSlot(3, subscriptionRevocation, 0)
def AddSubscriptionRevocation(builder, subscriptionRevocation):
return SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation)
def SubscriberFeaturesAddEventHistory(builder, eventHistory): builder.PrependBoolSlot(4, eventHistory, 0)
def AddEventHistory(builder, eventHistory):
return SubscriberFeaturesAddEventHistory(builder, eventHistory)
def SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived): builder.PrependBoolSlot(5, acknowledgeSubscriberReceived, 0)
def AddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived):
return SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived)
def SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(6, payloadTransparency, 0)
def AddPayloadTransparency(builder, payloadTransparency):
return SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency)
def SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(7, payloadEncryptionCryptobox, 0)
def AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox):
return SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox)
def SubscriberFeaturesEnd(builder): return builder.EndObject()
def METHOD_NAME(builder):
return SubscriberFeaturesEnd(builder |
298,819 | print error | #############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors: Kathrin Rack, Wadim Koslow
#
# Contact: Martin J. Kuehn <Martin.Kuehn@DLR.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
"""
@file getSimulationData.py
@brief Executes all data downloads which belong to the epidata package and downloads external data
The functions which are called are:
- getCaseData.get_case_data
- getPopulationData.get_population_data
- getVacccinationData.get_vaccination_data
- getDIVIData.get_divi_data
"""
from memilio.epidata import defaultDict as dd
from memilio.epidata import getCaseData
from memilio.epidata import getDataIntoPandasDataFrame as gd
from memilio.epidata import getDIVIData, getPopulationData, getVaccinationData
def METHOD_NAME(text):
print('Error: Something went wrong while getting ' + text +
' data. This was likely caused by a changed file format'
' of the source material. Please report this as an issue. ' + text +
' data could not be stored correctly.')
def get_simulation_data(read_data=dd.defaultDict['read_data'],
file_format=dd.defaultDict['file_format'],
out_folder=dd.defaultDict['out_folder'],
no_raw=dd.defaultDict['no_raw'],
start_date=dd.defaultDict['start_date'],
end_date=dd.defaultDict['end_date'],
impute_dates=dd.defaultDict['impute_dates'],
moving_average=dd.defaultDict['moving_average'],
make_plot=dd.defaultDict['make_plot'],
split_berlin=dd.defaultDict['split_berlin'],
rep_date=dd.defaultDict['rep_date'],
sanitize_data=dd.defaultDict['sanitize_data']
):
"""! Downloads all data from external sources
The functions which are called are:
- getCaseData.get_case_data
- getPopulationData.get_population_data
- getVaccinationData.get_vaccination_data
- getDIVIData.get_divi_data
Keyword arguments:
@param read_data True or False. Defines if data is read from file or downloaded. Default defined in defaultDict.
@param file_format File format which is used for writing the data. Default defined in defaultDict.
@param out_folder Folder where data is written to. Default defined in defaultDict.
@param no_raw True or False. Defines if unchanged raw data is saved or not. Default defined in defaultDict.
@param start_date Date of first date in dataframe. Default 2020-01-01.
@param end_date Date of last date in dataframe. Default defined in defaultDict.
@param impute_dates True or False. Defines if values for dates without new information are imputed. Default defined in defaultDict.
@param moving_average Integers >=0. Applies an 'moving_average'-days moving average on all time series
to smooth out effects of irregular reporting. Default defined in defaultDict.
@param make_plot True or False. Defines if plots are generated with matplotlib. Default defined in defaultDict.
@param split_berlin True or False. Defines if Berlin's disctricts are kept separated or get merged. Default defined in defaultDict.
@param rep_date True or False. Defines if reporting date or reference date is taken into dataframe. Default defined in defaultDict.
@param sanitize_data Value in {0,1,2,3}. Redistributes cases of every county either based on regions' ratios or on thresholds and population.
"""
arg_dict_all = {
"read_data": read_data, "file_format": file_format,
"out_folder": out_folder, "no_raw": no_raw}
arg_dict_data_download = {"start_date": start_date, "end_date": end_date,
"impute_dates": impute_dates, "moving_average": moving_average,
"make_plot": make_plot}
arg_dict_cases = {**arg_dict_all, **arg_dict_data_download,
"split_berlin": split_berlin, "rep_date": rep_date}
arg_dict_vacc = {**arg_dict_all, **arg_dict_data_download,
"sanitize_data": sanitize_data}
arg_dict_divi = {**arg_dict_all, **arg_dict_data_download}
try:
getCaseData.get_case_data(**arg_dict_cases)
except Exception as exp:
print(str(type(exp).__name__) + ": " + str(exp))
METHOD_NAME('case')
try:
getPopulationData.get_population_data(**arg_dict_all)
except Exception as exp:
print(str(type(exp).__name__) + ": " + str(exp))
METHOD_NAME('population')
try:
getDIVIData.get_divi_data(**arg_dict_divi)
except Exception as exp:
print(str(type(exp).__name__) + ": " + str(exp))
METHOD_NAME('DIVI')
try:
getVaccinationData.get_vaccination_data(**arg_dict_vacc)
except Exception as exp:
print(str(type(exp).__name__) + ": " + str(exp))
METHOD_NAME('vaccination')
def main():
"""! Main program entry."""
arg_dict = gd.cli("sim")
get_simulation_data(**arg_dict)
if __name__ == "__main__":
main() |
298,820 | test register | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .....admin.request_context import AdminRequestContext
from .....storage.error import StorageError
from .. import routes as test_module
from ..manager import V20DiscoveryMgr
from ..messages.queries import Queries, QueryItem
from ..models.discovery_record import V20DiscoveryExchangeRecord
class TestDiscoveryRoutes(AsyncTestCase):
async def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.profile = self.context.profile
self.request_dict = {
"context": self.context,
"outbound_message_router": async_mock.CoroutineMock(),
}
self.request = async_mock.MagicMock(
app={},
match_info={},
query={},
__getitem__=lambda _, k: self.request_dict[k],
)
async def test_query_features(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"query_protocol": "*"}
test_rec = V20DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
queries_msg=Queries(
queries=[
QueryItem(feature_type="protocol", match="*"),
QueryItem(feature_type="goal-code", match="test"),
]
),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
V20DiscoveryMgr, "create_and_send_query", autospec=True
) as mock_create_query:
mock_create_query.return_value = test_rec
res = await test_module.query_features(self.request)
mock_response.assert_called_once_with(test_rec.serialize())
async def test_query_features_with_connection(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {
"query_protocol": "*",
"connection_id": "test",
"query_goal_code": "test",
}
test_rec = V20DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
queries_msg=Queries(
queries=[
QueryItem(feature_type="protocol", match="*"),
QueryItem(feature_type="goal-code", match="test"),
]
),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
V20DiscoveryMgr, "create_and_send_query", autospec=True
) as mock_create_query:
mock_create_query.return_value = test_rec
res = await test_module.query_features(self.request)
mock_response.assert_called_once_with(test_rec.serialize())
async def test_query_records(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"connection_id": "test"}
test_rec = V20DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
queries_msg=Queries(
queries=[
QueryItem(feature_type="protocol", match="*"),
QueryItem(feature_type="goal-code", match="test"),
]
),
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V20DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.retrieve_by_connection_id.return_value = test_rec
res = await test_module.query_records(self.request)
mock_response.assert_called_once_with({"results": [test_rec.serialize()]})
async def test_query_records_x(self):
self.request.json = async_mock.CoroutineMock()
self.request.query = {"connection_id": "test"}
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V20DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.retrieve_by_connection_id.side_effect = StorageError
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.query_records(self.request)
async def test_query_records_all(self):
self.request.json = async_mock.CoroutineMock()
test_recs = [
V20DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa6",
queries_msg=Queries(
queries=[
QueryItem(feature_type="protocol", match="*"),
QueryItem(feature_type="goal-code", match="test"),
]
),
),
V20DiscoveryExchangeRecord(
discovery_exchange_id="3fa85f64-5717-4562-b3fc-2c963f66afa7",
queries_msg=Queries(
queries=[
QueryItem(feature_type="protocol", match="test"),
QueryItem(feature_type="goal-code", match="*"),
]
),
),
]
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V20DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.query.return_value = test_recs
res = await test_module.query_records(self.request)
mock_response.assert_called_once_with(
{"results": [k.serialize() for k in test_recs]}
)
async def test_query_records_connection_x(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module, "V20DiscoveryExchangeRecord", autospec=True
) as mock_ex_rec:
mock_ex_rec.query.side_effect = StorageError
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.query_records(self.request)
async def METHOD_NAME(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"] |
298,821 | is valid | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, List, Optional
from google.protobuf.json_format import MessageToJson
from typeguard import typechecked
from feast.protos.feast.core.Entity_pb2 import Entity as EntityProto
from feast.protos.feast.core.Entity_pb2 import EntityMeta as EntityMetaProto
from feast.protos.feast.core.Entity_pb2 import EntitySpecV2 as EntitySpecProto
from feast.usage import log_exceptions
from feast.value_type import ValueType
@typechecked
class Entity:
"""
An entity defines a collection of entities for which features can be defined. An
entity can also contain associated metadata.
Attributes:
name: The unique name of the entity.
value_type: The type of the entity, such as string or float.
join_key: A property that uniquely identifies different entities within the
collection. The join_key property is typically used for joining entities
with their associated features. If not specified, defaults to the name.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the entity, typically the email of the primary maintainer.
created_timestamp: The time when the entity was created.
last_updated_timestamp: The time when the entity was last updated.
"""
name: str
value_type: ValueType
join_key: str
description: str
tags: Dict[str, str]
owner: str
created_timestamp: Optional[datetime]
last_updated_timestamp: Optional[datetime]
@log_exceptions
def __init__(
self,
*,
name: str,
join_keys: Optional[List[str]] = None,
value_type: Optional[ValueType] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an Entity object.
Args:
name: The unique name of the entity.
join_keys (optional): A list of properties that uniquely identifies different entities
within the collection. This currently only supports a list of size one, but is
intended to eventually support multiple join keys.
value_type (optional): The type of the entity, such as string or float. If not specified,
it will be inferred from the schema of the underlying data source.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the entity, typically the email of the primary maintainer.
Raises:
ValueError: Parameters are specified incorrectly.
"""
self.name = name
self.value_type = value_type or ValueType.UNKNOWN
if join_keys and len(join_keys) > 1:
# TODO(felixwang9817): When multiple join keys are supported, add a `join_keys` attribute
# and deprecate the `join_key` attribute.
raise ValueError(
"An entity may only have a single join key. "
"Multiple join keys will be supported in the future."
)
elif join_keys and len(join_keys) == 1:
self.join_key = join_keys[0]
else:
self.join_key = self.name
self.description = description
self.tags = tags if tags is not None else {}
self.owner = owner
self.created_timestamp = None
self.last_updated_timestamp = None
def __hash__(self) -> int:
return hash((self.name, self.join_key))
def __eq__(self, other):
if not isinstance(other, Entity):
raise TypeError("Comparisons should only involve Entity class objects.")
if (
self.name != other.name
or self.value_type != other.value_type
or self.join_key != other.join_key
or self.description != other.description
or self.tags != other.tags
or self.owner != other.owner
):
return False
return True
def __str__(self):
return str(MessageToJson(self.to_proto()))
def __lt__(self, other):
return self.name < other.name
def METHOD_NAME(self):
"""
Validates the state of this entity locally.
Raises:
ValueError: The entity does not have a name or does not have a type.
"""
if not self.name:
raise ValueError("The entity does not have a name.")
if not self.value_type:
raise ValueError(f"The entity {self.name} does not have a type.")
@classmethod
def from_proto(cls, entity_proto: EntityProto):
"""
Creates an entity from a protobuf representation of an entity.
Args:
entity_proto: A protobuf representation of an entity.
Returns:
An Entity object based on the entity protobuf.
"""
entity = cls(
name=entity_proto.spec.name,
join_keys=[entity_proto.spec.join_key],
description=entity_proto.spec.description,
tags=dict(entity_proto.spec.tags),
owner=entity_proto.spec.owner,
)
entity.value_type = ValueType(entity_proto.spec.value_type)
if entity_proto.meta.HasField("created_timestamp"):
entity.created_timestamp = entity_proto.meta.created_timestamp.ToDatetime()
if entity_proto.meta.HasField("last_updated_timestamp"):
entity.last_updated_timestamp = (
entity_proto.meta.last_updated_timestamp.ToDatetime()
)
return entity
def to_proto(self) -> EntityProto:
"""
Converts an entity object to its protobuf representation.
Returns:
An EntityProto protobuf.
"""
meta = EntityMetaProto()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
spec = EntitySpecProto(
name=self.name,
value_type=self.value_type.value,
join_key=self.join_key,
description=self.description,
tags=self.tags,
owner=self.owner,
)
return EntityProto(spec=spec, meta=meta) |
298,822 | init read only transaction |
#
# Client code for Update Agent
# Copyright (c) 1999--2016 Red Hat, Inc. Distributed under GPLv2.
#
# Adrian Likins <alikins@redhat.com
#
#
# a couple of classes wrapping up transactions so that we
# can share transactions instead of creating new ones all over
#
import rpm
read_ts = None
ts = None
# ************* NOTE: ************#
# for the sake of clarity, the names "added/removed" as used here
# are indicative of what happened when the original transaction was
# ran. Aka, if you "up2date foobar" and it updates foobar-1-0 with
# foobar-2-0, you added foobar-2-0 and removed foobar-1-0
#
# The reason I mention this explicitly is the trouble of describing
# what happens when you rollback the transaction, which is basically
# the opposite, and leads to plenty of confusion
#
class TransactionData:
# simple data structure designed to transport info
# about rpm transactions around
def __init__(self):
self.data = {}
# a list of tuples of pkg info, and mode ('e', 'i', 'u')
# the pkgInfo is tuple of [name, version, release, epoch, arch]
# size is never used directly for this, it's here as a place holder
# arch is optional, if the server specifies it, go with what
# removed packages only need [n,v,r,e,arch]
self.data['packages'] = []
# list of flags to set for the transaction
self.data['flags'] = []
self.data['vsflags'] = []
self.data['probFilterFlags'] = []
def display(self):
out = ""
removed = []
installed = []
updated = []
misc = []
for (pkgInfo, mode) in self.data['packages']:
if mode == 'u':
updated.append(pkgInfo)
elif mode == 'i':
installed.append(pkgInfo)
elif mode == 'e':
removed.append(pkgInfo)
else:
misc.append(pkgInfo)
for pkgInfo in removed:
out = out + "\t\t[e] %s-%s-%s:%s\n" % (pkgInfo[0], pkgInfo[1], pkgInfo[2], pkgInfo[3])
for pkgInfo in installed:
out = out + "\t\t[i] %s-%s-%s:%s\n" % (pkgInfo[0], pkgInfo[1], pkgInfo[2], pkgInfo[3])
for pkgInfo in updated:
out = out + "\t\t[u] %s-%s-%s:%s\n" % (pkgInfo[0], pkgInfo[1], pkgInfo[2], pkgInfo[3])
for pkgInfo in misc:
out = out + "\t\t[%s] %s-%s-%s:%s\n" % (pkgInfo[5], pkgInfo[0], pkgInfo[1],
pkgInfo[2], pkgInfo[3])
return out
# wrapper/proxy class for rpm.Transaction so we can
# instrument it, etc easily
class Up2dateTransaction:
def __init__(self):
self.ts = rpm.TransactionSet()
self._methods = ['dbMatch',
'check',
'order',
'addErase',
'addInstall',
'run',
'IDTXload',
'IDTXglob',
'rollback',
'pgpImportPubkey',
'pgpPrtPkts',
'Debug',
'setFlags',
'setVSFlags',
'setProbFilter',
'hdrFromFdno']
self.tsflags = []
def __getattr__(self, attr):
if attr in self._methods:
return self.getMethod(attr)
else:
raise AttributeError(attr)
def getMethod(self, method):
# in theory, we can override this with
# profile/etc info
return getattr(self.ts, method)
# push/pop methods so we dont lose the previous
# set value, and we can potentiall debug a bit
# easier
def pushVSFlags(self, flags):
self.tsflags.append(flags)
self.ts.setVSFlags(self.tsflags[-1])
def popVSFlags(self):
del self.tsflags[-1]
self.ts.setVSFlags(self.tsflags[-1])
def METHOD_NAME():
global read_ts
if read_ts == None:
read_ts = Up2dateTransaction()
# FIXME: replace with macro defination
read_ts.pushVSFlags(-1)
return read_ts
|
298,823 | hosts | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
'get_cluster_output',
]
@pulumi.output_type
class GetClusterResult:
"""
A cluster resource
"""
def __init__(__self__, cluster_id=None, cluster_size=None, METHOD_NAME=None, id=None, name=None, provisioning_state=None, sku=None, type=None):
if cluster_id and not isinstance(cluster_id, int):
raise TypeError("Expected argument 'cluster_id' to be a int")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_size and not isinstance(cluster_size, int):
raise TypeError("Expected argument 'cluster_size' to be a int")
pulumi.set(__self__, "cluster_size", cluster_size)
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'hosts' to be a list")
pulumi.set(__self__, "hosts", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> int:
"""
The identity
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterSize")
def cluster_size(self) -> Optional[int]:
"""
The cluster size
"""
return pulumi.get(self, "cluster_size")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
The hosts
"""
return pulumi.get(self, "hosts")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The state of the cluster provisioning
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The cluster SKU
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
cluster_id=self.cluster_id,
cluster_size=self.cluster_size,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
A cluster resource
:param str cluster_name: Name of the cluster in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20230301:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
cluster_id=pulumi.get(__ret__, 'cluster_id'),
cluster_size=pulumi.get(__ret__, 'cluster_size'),
METHOD_NAME=pulumi.get(__ret__, 'hosts'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
sku=pulumi.get(__ret__, 'sku'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_cluster)
def get_cluster_output(cluster_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:
"""
A cluster resource
:param str cluster_name: Name of the cluster in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,824 | b check geom obj | """Helper functions to create and test Blender scene geometry data"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2005, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
import math
import mathutils
import nose
EPSILON = 0.005
"""Vertex coordinates for testing."""
b_verts = {
(-7.5, 7.5, 3.5),
(7.5, 3.75, 1.75),
(7.5, -3.75, -1.75),
(7.5, 3.75, -1.75),
(-7.5, 7.5, -3.5),
(-7.5, -7.5, 3.5),
(7.5, -3.75, 1.75),
(-7.5, -7.5, -3.5),
}
def b_create_base_geometry(b_name):
"""Create and return a single polyhedron blender object."""
b_obj = b_create_cube(b_name)
b_transform_cube(b_obj)
return b_obj
def b_create_cube(b_name):
"""Creates prim Cube, single sided"""
# create a base mesh, and set its name
bpy.ops.mesh.primitive_cube_add()
b_obj = bpy.data.objects[bpy.context.active_object.name]
b_obj.name = b_name
bpy.ops.object.shade_smooth()
b_obj.data.show_double_sided = False # b_mesh default: double sided - true, fix this
return b_obj
def b_transform_cube(b_obj):
""" Alters the cube, scaling, transforming """
b_apply_object_scale()
b_scale_single_face(b_obj)
b_obj.matrix_local = b_get_transform_matrix()
def b_apply_object_scale():
"""Scale the currently selected object along each axis."""
bpy.ops.transform.resize(value=(7.5, 1, 1), constraint_axis=(True, False, False))
bpy.ops.transform.resize(value=(1, 7.5, 1), constraint_axis=(False, True, False))
bpy.ops.transform.resize(value=(1, 1, 3.5), constraint_axis=(False, False, True))
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
def b_scale_single_face(b_obj):
"""Scale a single face of the object."""
# scale single face
for poly in b_obj.data.polygons:
poly.select = False
b_obj.data.polygons[2].select = True
for b_vert_index in b_obj.data.polygons[2].vertices:
b_obj.data.vertices[b_vert_index].co[1] = b_obj.data.vertices[b_vert_index].co[1] * 0.5
b_obj.data.vertices[b_vert_index].co[2] = b_obj.data.vertices[b_vert_index].co[2] * 0.5
def b_get_transform_matrix():
"""Return a non-trivial transform matrix."""
# translation
b_trans_mat = mathutils.Matrix.Translation((20.0, 20.0, 20.0))
# rotation
b_rot_mat_x = mathutils.Matrix.Rotation(math.radians(30.0), 4, 'X')
b_rot_mat_y = mathutils.Matrix.Rotation(math.radians(60.0), 4, 'Y')
b_rot_mat_z = mathutils.Matrix.Rotation(math.radians(90.0), 4, 'Z')
b_rot_mat = b_rot_mat_x * b_rot_mat_y * b_rot_mat_z
# scale
b_scale_mat = mathutils.Matrix.Scale(0.75, 4)
b_transform_mat = b_trans_mat * b_rot_mat * b_scale_mat
return b_transform_mat
def METHOD_NAME(b_obj):
b_mesh = b_obj.data
# b_check_transform(b_obj)
b_check_geom(b_mesh)
b_check_vertex_count(b_mesh)
def b_check_transform(b_obj):
b_loc_vec, b_rot_quat, b_scale_vec = b_obj.matrix_local.decompose() # transforms
nose.tools.assert_equal(b_obj.location, mathutils.Vector((20.0, 20.0, 20.0))) # location
b_rot_eul = b_rot_quat.to_euler()
# print_rotation(b_rot_eul)
nose.tools.assert_equal((b_rot_eul.x - math.radians(30.0)) < EPSILON, True) # x rotation
nose.tools.assert_equal((b_rot_eul.y - math.radians(60.0)) < EPSILON, True) # y rotation
nose.tools.assert_equal((b_rot_eul.z - math.radians(90.0)) < EPSILON, True) # z rotation
nose.tools.assert_equal((b_scale_vec - mathutils.Vector((0.75, 0.75, 0.75))) < mathutils.Vector((EPSILON, EPSILON, EPSILON)), True) # uniform scale
def print_rotation(b_rot_eul):
print("HEERE")
print(b_rot_eul)
print(b_rot_eul.x)
print(b_rot_eul.y)
print(b_rot_eul.z)
print("RADS")
print(math.radians(30.0))
print(math.radians(60.0))
print(math.radians(90.0))
def b_check_geom(b_mesh):
num_triangles = len([face for face in b_mesh.polygons if len(face.vertices) == 3]) # check for tri
num_triangles += 2 * len([face for face in b_mesh.polygons if len(face.vertices) == 4]) # face = 2 tris
nose.tools.assert_equal(num_triangles, 12)
def b_check_vertex_count(b_mesh):
nose.tools.assert_equal(len(b_mesh.vertices), 8)
verts = {
tuple(round(co, 4) for co in vert.co)
for vert in b_mesh.vertices
}
nose.tools.assert_set_equal(verts, b_verts) |
298,825 | yield val | from collections import defaultdict
import pytest
from dagster import (
AssetMaterialization,
DagsterInvalidDefinitionError,
DagsterTypeCheckDidNotPass,
In,
Int,
List,
Nothing,
Optional,
Out,
Output,
asset,
job,
materialize_to_memory,
op,
)
from dagster._core.execution.api import create_execution_plan
def _define_nothing_dep_job():
@op(out={"complete": Out(Nothing)})
def start_nothing():
pass
@op(
ins={
"add_complete": In(Nothing),
"yield_complete": In(Nothing),
}
)
def end_nothing():
pass
@op
def emit_value() -> int:
return 1
@op(ins={"on_complete": In(Nothing), "num": In(Int)})
def add_value(num) -> int:
return 1 + num
@op(
name="yield_values",
ins={"on_complete": In(Nothing)},
out={
"num_1": Out(Int),
"num_2": Out(Int),
"complete": Out(Nothing),
},
)
def yield_values():
yield Output(1, "num_1")
yield Output(2, "num_2")
yield Output(None, "complete")
@job
def simple_exc():
start_complete = start_nothing()
_, _, yield_complete = yield_values(start_complete)
end_nothing(
add_complete=add_value(on_complete=start_complete, num=emit_value()),
yield_complete=yield_complete,
)
return simple_exc
def test_valid_nothing_dependencies():
result = _define_nothing_dep_job().execute_in_process()
assert result.success
def test_nothing_output_something_input():
@op(out=Out(Nothing))
def do_nothing():
pass
@op(ins={"num": In(Int)})
def add_one(num) -> int:
return num + 1
@job
def bad_dep():
add_one(do_nothing())
with pytest.raises(DagsterTypeCheckDidNotPass):
bad_dep.execute_in_process()
def test_result_type_check():
@op(out=Out(Nothing))
def bad():
yield Output("oops")
@job
def fail():
bad()
with pytest.raises(DagsterTypeCheckDidNotPass):
fail.execute_in_process()
def test_nothing_inputs():
@op(ins={"never_defined": In(Nothing)})
def emit_one():
return 1
@op
def emit_two():
return 2
@op
def emit_three():
return 3
@op(out=Out(Nothing))
def emit_nothing():
pass
@op(
ins={
"_one": In(Nothing),
"one": In(Int),
"_two": In(Nothing),
"two": In(Int),
"_three": In(Nothing),
"three": In(Int),
}
)
def adder(one, two, three):
assert one == 1
assert two == 2
assert three == 3
return one + two + three
@job
def input_test():
_one = emit_nothing.alias("_one")()
_two = emit_nothing.alias("_two")()
_three = emit_nothing.alias("_three")()
adder(
_one=_one,
_two=_two,
_three=_three,
one=emit_one(),
two=emit_two(),
three=emit_three(),
)
result = input_test.execute_in_process()
assert result.success
def test_fanin_deps():
called = defaultdict(int)
@op
def emit_two():
return 2
@op(out=Out(Nothing))
def emit_nothing():
called["emit_nothing"] += 1
@op(
ins={
"ready": In(Nothing),
"num_1": In(Int),
"num_2": In(Int),
}
)
def adder(num_1, num_2):
assert called["emit_nothing"] == 3
called["adder"] += 1
return num_1 + num_2
@job
def input_test():
adder(
ready=[
emit_nothing.alias("_one")(),
emit_nothing.alias("_two")(),
emit_nothing.alias("_three")(),
],
num_1=emit_two.alias("emit_1")(),
num_2=emit_two.alias("emit_2")(),
)
result = input_test.execute_in_process()
assert result.success
assert called["adder"] == 1
assert called["emit_nothing"] == 3
def test_valid_nothing_fns():
@op(out=Out(Nothing))
def just_pass():
pass
@op(out=Out(Nothing))
def just_pass2():
pass
@op(out=Out(Nothing))
def ret_none():
return None
@op(out=Out(Nothing))
def yield_none():
yield Output(None)
@op(out=Out(Nothing))
def yield_stuff():
yield AssetMaterialization.file("/path/to/nowhere")
@job
def fn_test():
just_pass()
just_pass2()
ret_none()
yield_none()
yield_stuff()
result = fn_test.execute_in_process()
assert result.success
# test direct invocations
just_pass()
just_pass2()
ret_none()
[_ for _ in yield_none()]
[_ for _ in yield_stuff()]
def test_invalid_nothing_fns():
@op(out=Out(Nothing))
def ret_val():
return "val"
@op(out=Out(Nothing))
def METHOD_NAME():
yield Output("val")
with pytest.raises(DagsterTypeCheckDidNotPass):
@job
def fn_test():
ret_val()
fn_test.execute_in_process()
with pytest.raises(DagsterTypeCheckDidNotPass):
@job
def fn_test2():
METHOD_NAME()
fn_test2.execute_in_process()
def test_wrapping_nothing():
with pytest.raises(DagsterInvalidDefinitionError):
@op(out=Out(List[Nothing]))
def _():
pass
with pytest.raises(DagsterInvalidDefinitionError):
@op(ins={"in": In(List[Nothing])})
def _(_in):
pass
with pytest.raises(DagsterInvalidDefinitionError):
@op(out=Out(Optional[Nothing]))
def _():
pass
with pytest.raises(DagsterInvalidDefinitionError):
@op(ins={"in": In(Optional[Nothing])})
def _(_in):
pass
def test_execution_plan():
@op(out=Out(Nothing))
def emit_nothing():
yield AssetMaterialization.file(path="/path/")
@op(ins={"ready": In(Nothing)})
def consume_nothing():
pass
@job
def pipe():
consume_nothing(emit_nothing())
plan = create_execution_plan(pipe)
levels = plan.get_steps_to_execute_by_level()
assert "emit_nothing" in levels[0][0].key
assert "consume_nothing" in levels[1][0].key
assert pipe.execute_in_process().success
def test_nothing_infer():
with pytest.raises(
DagsterInvalidDefinitionError,
match="which should not be included since no data will be passed for it",
):
@op(ins={"_previous_steps_complete": In(Nothing)})
def _bad(_previous_steps_complete):
pass
with pytest.raises(
DagsterInvalidDefinitionError,
match=(
r"must be used via In\(\) and no parameter should be included in the @op decorated"
r" function"
),
):
@op
def _bad(_previous_steps_complete: Nothing): # type: ignore
pass
def test_none_output_non_none_input():
@op
def op1() -> None:
pass
@op
def op2(input1):
assert input1 is None
@job
def job1():
op2(op1())
assert job1.execute_in_process().success
def test_asset_none_output_non_none_input():
@asset
def asset1() -> None:
pass
@asset
def asset2(asset1):
assert asset1 is None
assert materialize_to_memory([asset1, asset2]).success
def test_asset_nothing_output_non_none_input():
@asset(dagster_type=Nothing)
def asset1():
pass
@asset
def asset2(asset1):
assert asset1 is None
assert materialize_to_memory([asset1, asset2]).success |
298,826 | add to custom detectors | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
import yaml
from checkov.common.bridgecrew.platform_integration import bc_integration
from checkov.common.util.file_utils import decompress_file_gzip_base64
def load_detectors() -> list[dict[str, Any]]:
detectors: List[dict[str, Any]] = []
try:
customer_run_config_response = bc_integration.customer_run_config_response
policies_list: List[dict[str, Any]] = []
if customer_run_config_response:
policies_list = customer_run_config_response.get('secretsPolicies', [])
except Exception as e:
logging.error(f"Failed to get detectors from customer_run_config_response, error: {e}")
return []
if policies_list:
detectors = modify_secrets_policy_to_detectors(policies_list)
if detectors:
logging.info(f"Successfully loaded {len(detectors)} detectors from bc_integration")
return detectors
def modify_secrets_policy_to_detectors(policies_list: List[dict[str, Any]]) -> List[dict[str, Any]]:
secrets_list = transforms_policies_to_detectors_list(policies_list)
logging.debug(f"(modify_secrets_policy_to_detectors) len secrets_list = {len(secrets_list)}")
return secrets_list
def METHOD_NAME(custom_detectors: List[Dict[str, Any]], name: str, check_id: str, regex: str,
is_custom: str, is_multiline: bool = False, supported_files: Optional[List[str]] = None) -> None:
custom_detectors.append({
'Name': name,
'Check_ID': check_id,
'Regex': regex,
'isCustom': is_custom,
'isMultiline': is_multiline,
'supportedFiles': supported_files if supported_files else []
})
def add_detectors_from_condition_query(custom_detectors: List[Dict[str, Any]], condition_query: Dict[str, Any],
secret_policy: Dict[str, Any], check_id: str) -> bool:
parsed = False
cond_type = condition_query['cond_type']
if cond_type == 'secrets':
value = condition_query['value']
if type(value) is str:
value = [value]
for regex in value:
parsed = True
METHOD_NAME(custom_detectors, secret_policy['title'], check_id, regex,
secret_policy['isCustom'])
return parsed
def add_detectors_from_code(custom_detectors: List[Dict[str, Any]], code: str, secret_policy: Dict[str, Any],
check_id: str) -> bool:
parsed = False
code_dict = yaml.safe_load(code)
if 'definition' in code_dict:
if 'value' in code_dict['definition'] and 'is_runnable' not in code_dict['definition']:
parsed = True
if type(code_dict['definition']['value']) is str:
code_dict['definition']['value'] = [code_dict['definition']['value']]
for regex in code_dict['definition']['value']:
METHOD_NAME(
custom_detectors,
secret_policy['title'],
check_id,
regex,
secret_policy['isCustom'],
code_dict['definition'].get("multiline", False),
code_dict['definition'].get("supported_files", [])
)
return parsed
def transforms_policies_to_detectors_list(custom_secrets: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
custom_detectors: List[Dict[str, Any]] = []
condition_query = None
for secret_policy in custom_secrets:
parsed = False
check_id = secret_policy['checkovCheckId'] if secret_policy['checkovCheckId'] else \
secret_policy['incidentId']
code = secret_policy['code']
if 'conditionQuery' in secret_policy:
condition_query = secret_policy['conditionQuery']
if condition_query:
parsed = add_detectors_from_condition_query(custom_detectors, condition_query, secret_policy, check_id)
elif code:
parsed = add_detectors_from_code(custom_detectors, code, secret_policy, check_id)
if not parsed:
logging.info(f"policy : {secret_policy} could not be parsed")
return custom_detectors
def get_runnable_plugins(policies: List[Dict[str, Any]]) -> Dict[str, str]:
runnables: dict[str, str] = {}
for policy in policies:
code = policy['code']
if code:
try:
code_dict = yaml.safe_load(code)
if 'definition' in code_dict:
if 'is_runnable' in code_dict['definition'] and 'value' in code_dict['definition']:
encoded_payload = code_dict['definition']['value']
if isinstance(encoded_payload, list):
encoded_payload = encoded_payload[0]
decoded_payload = decompress_file_gzip_base64(encoded_payload)
name: str = policy['title']
runnables[name] = decoded_payload.decode('utf8')
except Exception as e:
logging.warning(f"Could not parse runnable policy {policy['title']} due to: {e}")
return runnables |
298,827 | cb | from abc import ABCMeta, abstractmethod
import asyncio
from .AbstractConnector import AbstractConnector
from iroha import Iroha, IrohaCrypto, IrohaGrpc
import schedule
#import binascii ### for sendAsyncRequest
class IrohaConnector(AbstractConnector):
def __init__(self, socketio, sessionid, iroha_dic, socketIoValidator):
self.moduleName = "IrohaConnector"
self.iroha_dic = iroha_dic
self.socketIoValidator = socketIoValidator
self.net = IrohaGrpc('localhost:50051')
self.iroha = Iroha('admin@test')
self.admin_priv_key = 'f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70' #Private Key of user decided at previous line
self.latestNumOfBlocks = 0
self.isMonitoring = False
print(f"##{self.moduleName}.__init__")
def getValidatorInformation(self, validatorURL):
"""Get the validator information including version, name, ID, and other information"""
print(f"##{self.moduleName}.getValidatorInformation()")
def sendAsyncRequest(self, requestData):
"""Request a verifier to execute a ledger operation"""
print(f"##{self.moduleName}.sendAsyncRequest()")
command = requestData['method']['command']
if command == 'sendTx':
return send_tx(requestData['args']['tx'])
def send_tx(self, tx):
#hex_hash = binascii.hexlify(IrohaCrypto.hash(tx))
net.send_tx(tx)
def getBalance(self, address):
"""Get balance of an account for native token on a leder"""
print(f"##{self.moduleName}.getBalance()")
def execSyncFunction(self, address, funcName, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.execSyncFunction()")
command = args['method']['command']
print(f"##execSyncFunction : args['args']['args'] : {args['args']['args']}")
def startMonitor(self):
"""Request a validator to start monitoring ledger"""
# initial execution for getting current number of blocks
self.monitoring_routine(True)
self.isMonitoring = True
print(f"##{self.moduleName}.startMonitor()")
schedule.every(1).minutes.do(self.monitoring_routine)
while self.isMonitoring:
schedule.run_pending()
def stopMonitor(self):
"""Request a validator to stop monitoring ledger"""
self.isMonitoring = False
print(f"##{self.moduleName}.stopMonitor()")
def METHOD_NAME(self, callbackData):
"""Callback function to call when receiving data from Ledger"""
print(f"##{self.moduleName}.cb()")
def nop(self):
"""Nop function for testing"""
print(f"##{self.moduleName}.nop()")
def run_coroutine(self, coroutine, command, args, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(coroutine(command, args))
return result
def get_block(self, blockNum):
print(f'##get_block block num is : {blockNum}')
# create Query
get_block_query = self.iroha.query(
'GetBlock',
height = blockNum
)
# sign Query
IrohaCrypto.sign_query(get_block_query, self.admin_priv_key)
# send Query
response = self.net.send_query(get_block_query)
return response
def monitoring_routine(self, isInit = False):
print(f'##called monitoring_routine()')
while(True):
blockData = self.get_block(self.latestNumOfBlocks + 1)
if(blockData.error_response.error_code == 0):
self.latestNumOfBlocks += 1
if(not isInit):
event = self.extract_event(blockData)
self.socketIoValidator.publish_event(event)
elif(blockData.error_response.error_code == 3):
break
def extract_event(self, blockData):
# TODO return event which is extracted from blockData
# improve temporary returning blockData
return blockDat |
298,828 | configure aws clients | """RoboMaker component for creating a simulation job batch."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
from simulation_job_batch.src.robomaker_simulation_job_batch_spec import (
RoboMakerSimulationJobBatchSpec,
RoboMakerSimulationJobBatchInputs,
RoboMakerSimulationJobBatchOutputs,
)
from common.sagemaker_component import (
SageMakerComponent,
ComponentMetadata,
SageMakerJobStatus,
)
from common.boto3_manager import Boto3Manager
from common.common_inputs import SageMakerComponentCommonInputs
@ComponentMetadata(
name="RoboMaker - Create Simulation Job Batch",
description="Creates a simulation job batch.",
spec=RoboMakerSimulationJobBatchSpec,
)
class RoboMakerSimulationJobBatchComponent(SageMakerComponent):
"""RoboMaker component for creating a simulation job."""
def Do(self, spec: RoboMakerSimulationJobBatchSpec):
super().Do(spec.inputs, spec.outputs, spec.output_paths)
def _get_job_status(self) -> SageMakerJobStatus:
batch_response = self._rm_client.describe_simulation_job_batch(batch=self._arn)
batch_status = batch_response["status"]
if batch_status in ["Completed"]:
return SageMakerJobStatus(
is_completed=True, has_error=False, raw_status=batch_status
)
if batch_status in ["TimedOut", "Canceled"]:
simulation_message = "Simulation jobs are completed\n"
has_error = False
for completed_request in batch_response["createdRequests"]:
self._sim_request_ids.add(completed_request["arn"].split("/")[-1])
simulation_response = self._rm_client.describe_simulation_job(
job=completed_request["arn"]
)
if "failureCode" in simulation_response:
simulation_message += f"Simulation job: {simulation_response['arn']} failed with errorCode:{simulation_response['failureCode']}\n"
has_error = True
return SageMakerJobStatus(
is_completed=True,
has_error=has_error,
error_message=simulation_message,
raw_status=batch_status,
)
if batch_status in ["Failed"]:
failure_message = f"Simulation batch job is in status:{batch_status}\n"
if "failureReason" in batch_response:
failure_message += (
f"Simulation failed with reason:{batch_response['failureReason']}"
)
if "failureCode" in batch_response:
failure_message += (
f"Simulation failed with errorCode:{batch_response['failureCode']}"
)
return SageMakerJobStatus(
is_completed=True,
has_error=True,
error_message=failure_message,
raw_status=batch_status,
)
return SageMakerJobStatus(is_completed=False, raw_status=batch_status)
def METHOD_NAME(self, inputs: SageMakerComponentCommonInputs):
"""Configures the internal AWS clients for the component.
Args:
inputs: A populated list of user inputs.
"""
self._rm_client = Boto3Manager.get_robomaker_client(
self._get_component_version(),
inputs.region,
endpoint_url=inputs.endpoint_url,
assume_role_arn=inputs.assume_role,
)
self._cw_client = Boto3Manager.get_cloudwatch_client(
inputs.region, assume_role_arn=inputs.assume_role
)
def _after_job_complete(
self,
job: Dict,
request: Dict,
inputs: RoboMakerSimulationJobBatchInputs,
outputs: RoboMakerSimulationJobBatchOutputs,
):
for sim_request_id in self._sim_request_ids:
logging.info(
"Simulation Job in RoboMaker: https://{}.console.aws.amazon.com/robomaker/home?region={}#/simulationJobBatches/{}".format(
inputs.region, inputs.region, sim_request_id
)
)
def _on_job_terminated(self):
self._rm_client.cancel_simulation_job_batch(batch=self._arn)
def _create_job_request(
self,
inputs: RoboMakerSimulationJobBatchInputs,
outputs: RoboMakerSimulationJobBatchOutputs,
) -> Dict:
"""
Documentation:https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/robomaker.html#RoboMaker.Client.start_simulation_job_batch
"""
request = self._get_request_template("robomaker.simulation.job.batch")
# Set batch policy inputs
if inputs.timeout_in_secs:
request["batchPolicy"]["timeoutInSeconds"] = inputs.timeout_in_secs
if inputs.max_concurrency:
request["batchPolicy"]["maxConcurrency"] = inputs.max_concurrency
if not inputs.timeout_in_secs and not inputs.max_concurrency:
request.pop("batchPolicy")
# Set the simulation job inputs
request["createSimulationJobRequests"] = inputs.simulation_job_requests
# Override with ARN of sim application from input. Can be used to pass ARN from create sim app component.
if inputs.sim_app_arn:
for sim_job_request in request["createSimulationJobRequests"]:
for sim_jobs in sim_job_request["simulationApplications"]:
sim_jobs["application"] = inputs.sim_app_arn
return request
def _submit_job_request(self, request: Dict) -> Dict:
return self._rm_client.start_simulation_job_batch(**request)
def _after_submit_job_request(
self,
job: Dict,
request: Dict,
inputs: RoboMakerSimulationJobBatchInputs,
outputs: RoboMakerSimulationJobBatchOutputs,
):
outputs.arn = self._arn = job["arn"]
outputs.batch_job_id = self._batch_job_id = job["arn"].split("/")[-1]
logging.info(
f"Started Robomaker Simulation Job Batch with ID: {self._batch_job_id}"
)
logging.info(
"Simulation Job Batch in RoboMaker: https://{}.console.aws.amazon.com/robomaker/home?region={}#/simulationJobBatches/{}".format(
inputs.region, inputs.region, self._batch_job_id
)
)
self._sim_request_ids = set()
for created_request in job["createdRequests"]:
self._sim_request_ids.add(created_request["arn"].split("/")[-1])
logging.info(
f"Started Robomaker Simulation Job with ID: {created_request['arn'].split('/')[-1]}"
)
# Inform if we have any pending or failed requests
if job["pendingRequests"]:
logging.info("Some Simulation Requests are in state Pending")
if job["failedRequests"]:
logging.info("Some Simulation Requests are in state Failed")
def _print_logs_for_job(self):
for sim_request_id in self._sim_request_ids:
self._print_cloudwatch_logs("/aws/robomaker/SimulationJobs", sim_request_id)
if __name__ == "__main__":
import sys
spec = RoboMakerSimulationJobBatchSpec(sys.argv[1:])
component = RoboMakerSimulationJobBatchComponent()
component.Do(spec) |
298,829 | test search | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid.httpexceptions import HTTPBadRequest, HTTPMovedPermanently, HTTPNotFound
from trove_classifiers import sorted_classifiers
from warehouse.legacy.api import pypi
from ....common.db.classifiers import ClassifierFactory
def test_exc_with_message():
exc = pypi._exc_with_message(HTTPBadRequest, "My Test Message.")
assert isinstance(exc, HTTPBadRequest)
assert exc.status_code == 400
assert exc.status == "400 My Test Message."
@pytest.mark.parametrize(
("settings", "expected_domain"),
[
({}, "example.com"),
({"warehouse.domain": "w.example.com"}, "w.example.com"),
(
{"forklift.domain": "f.example.com", "warehouse.domain": "w.example.com"},
"f.example.com",
),
],
)
def test_forklifted(settings, expected_domain):
request = pretend.stub(
domain="example.com", registry=pretend.stub(settings=settings)
)
information_url = "TODO"
resp = pypi.forklifted(request)
assert resp.status_code == 410
assert resp.status == (
"410 This API has moved to https://{}/legacy/. See {} for more information."
).format(expected_domain, information_url)
def test_doap(pyramid_request):
resp = pypi.doap(pyramid_request)
assert resp.status_code == 410
assert resp.status == "410 DOAP is no longer supported."
def test_forbidden_legacy():
exc, request = pretend.stub(), pretend.stub()
resp = pypi.forbidden_legacy(exc, request)
assert resp is exc
def test_list_classifiers(db_request):
resp = pypi.list_classifiers(db_request)
assert resp.status_code == 200
assert resp.text == "\n".join(sorted_classifiers)
def METHOD_NAME():
term = pretend.stub()
request = pretend.stub(
params={"term": term},
route_path=pretend.call_recorder(lambda *a, **kw: "/the/path"),
)
result = pypi.search(request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == "/the/path"
assert result.status_code == 301
assert request.route_path.calls == [pretend.call("search", _query={"q": term})]
class TestBrowse:
def test_browse(self, db_request):
classifier = ClassifierFactory.create(classifier="foo :: bar")
db_request.params = {"c": str(classifier.id)}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the/path")
result = pypi.browse(db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == "/the/path"
assert result.status_code == 301
assert db_request.route_path.calls == [
pretend.call("search", _query={"c": classifier.classifier})
]
def test_browse_no_id(self):
request = pretend.stub(params={})
with pytest.raises(HTTPNotFound):
pypi.browse(request)
def test_browse_bad_id(self, db_request):
db_request.params = {"c": "99999"}
with pytest.raises(HTTPNotFound):
pypi.browse(db_request)
def test_brows_invalid_id(self, request):
request = pretend.stub(params={"c": '7"'})
with pytest.raises(HTTPNotFound):
pypi.browse(request)
class TestFiles:
def test_files(self, db_request):
name = "pip"
version = "10.0.0"
db_request.params = {"name": name, "version": version}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/project/{name}/{version}/#files"
)
result = pypi.files(db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == (f"/project/{name}/{version}/#files")
assert result.status_code == 301
assert db_request.route_path.calls == [
pretend.call(
"packaging.release", name=name, version=version, _anchor="files"
)
]
def test_files_no_version(self, db_request):
name = "pip"
db_request.params = {"name": name}
with pytest.raises(HTTPNotFound):
pypi.files(db_request)
def test_files_no_name(self, db_request):
version = "10.0.0"
db_request.params = {"version": version}
with pytest.raises(HTTPNotFound):
pypi.files(db_request)
class TestDisplay:
def test_display(self, db_request):
name = "pip"
version = "10.0.0"
db_request.params = {"name": name, "version": version}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/project/{name}/{version}/"
)
result = pypi.display(db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == (f"/project/{name}/{version}/")
assert result.status_code == 301
assert db_request.route_path.calls == [
pretend.call("packaging.release", name=name, version=version)
]
def test_display_no_version(self, db_request):
name = "pip"
db_request.params = {"name": name}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/project/{name}/"
)
result = pypi.display(db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == (f"/project/{name}/")
assert result.status_code == 301
assert db_request.route_path.calls == [
pretend.call("packaging.project", name=name)
]
def test_display_no_name(self, db_request):
version = "10.0.0"
db_request.params = {"version": version}
with pytest.raises(HTTPNotFound):
pypi.display(db_request) |
298,830 | read encoded template data | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import base64
import hashlib
import logging
from functools import partial
from typing import Any, Dict, List, Optional, Tuple
import ujson as json
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from pipeline.core.constants import PE
from gcloud import err_code
from gcloud.conf import settings
from gcloud.constants import COMMON, PROJECT
logger = logging.getLogger("root")
def METHOD_NAME(content):
try:
data = json.loads(base64.b64decode(content))
except Exception:
message = _("模板解析失败: 文件解析异常, 模板参数缺陷. 请重试或联系管理员处理 | read_encoded_template_data")
logger.error(message)
return {"result": False, "message": message, "code": err_code.REQUEST_PARAM_INVALID.code}
# check the validation of file
templates_data = data["template_data"]
check_digest = partial(check_template_digest, templates_data=templates_data, data_digest=data["digest"])
if not check_digest(salt=settings.TEMPLATE_DATA_SALT):
if not check_digest(salt=settings.OLD_COMMUNITY_TEMPLATE_DATA_SALT):
message = _("模板解析失败: 文件解析异常, 模板参数非法. 请重试或联系管理员处理 | read_encoded_template_data")
logger.error(message)
return {"result": False, "message": message, "code": err_code.VALIDATION_ERROR.code}
return {"result": True, "data": data, "code": err_code.SUCCESS.code}
def check_template_digest(templates_data, data_digest, salt):
data_string = (json.dumps(templates_data, sort_keys=True) + salt).encode("utf-8")
digest = hashlib.md5(data_string).hexdigest()
is_data_valid = digest == data_digest
if not is_data_valid:
return False
return True
def read_template_data_file(f):
return METHOD_NAME(content=f.read())
def replace_template_id(template_model, pipeline_data, reverse=False):
activities = pipeline_data[PE.activities]
for act_id, act in list(activities.items()):
if act["type"] == PE.SubProcess:
subprocess_template_model = (
apps.get_model("template", "CommonTemplate") if act.get("template_source") == COMMON else template_model
)
if not reverse:
act["template_id"] = subprocess_template_model.objects.get(
pk=act["template_id"]
).pipeline_template.template_id
else:
template = subprocess_template_model.objects.get(pipeline_template__template_id=act["template_id"])
act["template_id"] = str(template.pk)
def inject_template_node_id(pipeline_tree: dict):
"""pipeline_tree需要在unfold_subprocess之后才可递归处理"""
for act_id, act in pipeline_tree[PE.activities].items():
act["template_node_id"] = act.get("template_node_id") or act_id
if act[PE.type] == PE.SubProcess:
if "pipeline_tree" in act:
inject_template_node_id(act["pipeline_tree"])
if "pipeline" in act:
inject_template_node_id(act["pipeline"])
def inject_original_template_info(pipeline_tree: dict):
"""填充模版信息到子流程"""
task_template_model = apps.get_model("tasktmpl3", "TaskTemplate")
common_template_model = apps.get_model("template", "CommonTemplate")
for act_id, act in pipeline_tree["activities"].items():
if act["type"] == "SubProcess":
inject_original_template_info(act["pipeline"])
pipeline_template_id = act["template_id"]
# 旧模版数据可能没有template_source字段
tmpl_model_cls, candidate_tmpl_model_cls = (
(common_template_model, task_template_model)
if act.get("template_source") == COMMON
else (task_template_model, common_template_model)
)
template = (
tmpl_model_cls.objects.filter(pipeline_template_id=pipeline_template_id).first()
or candidate_tmpl_model_cls.objects.filter(pipeline_template_id=pipeline_template_id).first()
)
if not template:
raise ValueError(f"Template with pipeline_template_id: {pipeline_template_id} not found")
act["template_source"] = COMMON if isinstance(template, common_template_model) else PROJECT
act["original_template_id"] = str(template.id)
act["original_template_version"] = template.version
def replace_biz_id_value(pipeline_tree: dict, bk_biz_id: int):
service_acts = [act for act in pipeline_tree["activities"].values() if act["type"] == "ServiceActivity"]
for act in service_acts:
act_info = act["component"]["data"]
bk_biz_id_field = act_info.get("biz_cc_id") or act_info.get("bk_biz_id")
if bk_biz_id_field and (not bk_biz_id_field["hook"]):
bk_biz_id_field["value"] = bk_biz_id
for constant in pipeline_tree["constants"].values():
if (
constant["source_tag"].endswith(".biz_cc_id") or constant["source_tag"].endswith(".bk_biz_id")
) and constant["value"]:
constant["value"] = bk_biz_id
def fill_default_version_to_service_activities(pipeline_tree):
"""
填充默认版本到 ServiceActivity 类型的节点,避免因导出数据版本丢失导致流程导入后无法正常执行
:param pipeline_tree:
:return:
"""
service_acts = [act for act in pipeline_tree["activities"].values() if act["type"] == "ServiceActivity"]
for act in service_acts:
if not act.get("component"):
continue
if not act["component"].get("version"):
act["component"]["version"] = "legacy"
def fetch_templates_info(
pipeline_template_ids: List,
fetch_fields: Tuple,
appointed_template_type: Optional[str] = None,
) -> List[Dict]:
"""
根据pipeline template id列表获取上层template数据,
:param pipeline_template_ids: PipelineTemplate id 列表
:param fetch_fields: 返回的模版包含的字段
:param appointed_template_type: 搜索的模版类型,common/project/None,None表示不指定类型
:return: 模版信息列表,不保证返回数据与输入id一一对应
"""
def get_templates(template_model):
template_qs = template_model.objects.filter(pipeline_template_id__in=pipeline_template_ids).values(
*fetch_fields
)
template_type = COMMON if template_model.__name__ == "CommonTemplate" else PROJECT
return [{"template_type": template_type, **template} for template in template_qs]
task_template_model = apps.get_model("tasktmpl3", "TaskTemplate")
common_template_model = apps.get_model("template", "CommonTemplate")
if appointed_template_type:
templates = get_templates(common_template_model if appointed_template_type == COMMON else task_template_model)
else:
task_templates = get_templates(task_template_model)
common_templates = (
get_templates(common_template_model) if len(pipeline_template_ids) > len(task_templates) else []
)
templates = task_templates + common_templates
return templates
def format_import_result_to_response_data(import_result: Dict[str, Any]) -> Dict[str, Any]:
"""
将模板导出结果解析为接口返回数据
:param import_result:
:return:
"""
return {
"result": import_result["result"],
"message": import_result["message"],
"code": import_result["code"],
"data": import_result["data"],
} |
298,831 | show popup editor | # (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Defines the base wxPython EditorFactory class and classes the various
styles of editors used in a Traits-based user interface.
"""
import warnings
import wx
from traits.api import TraitError, Any, Bool, Event, Str
from .editor import Editor
from .constants import WindowColor
class SimpleEditor(Editor):
"""Base class for simple style editors, which displays a text field
containing the text representation of the object trait value. Clicking
in the text field displays an editor-specific dialog box for changing
the value.
"""
#: Has the left mouse button been pressed:
left_down = Bool(False)
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
self.control = self.create_control(parent)
self.control.Bind(wx.EVT_LEFT_DOWN, self._enable_popup_editor)
self.control.Bind(wx.EVT_LEFT_UP, self.METHOD_NAME)
self.set_tooltip()
def create_control(self, parent):
"""Creates the control to use for the simple editor."""
return wx.TextCtrl(parent, -1, self.str_value, style=wx.TE_READONLY)
# -------------------------------------------------------------------------
# Invokes the pop-up editor for an object trait:
#
# (Normally overridden in a subclass)
# -------------------------------------------------------------------------
def popup_editor(self, event):
"""Invokes the pop-up editor for an object trait."""
pass
def _enable_popup_editor(self, event):
"""Mark the left mouse button as being pressed currently."""
self.left_down = True
def METHOD_NAME(self, event):
"""Display the popup editor if the left mouse button was pressed
previously.
"""
if self.left_down:
self.left_down = False
self.popup_editor(event)
class TextEditor(Editor):
"""Base class for text style editors, which displays an editable text
field, containing a text representation of the object trait value.
"""
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
self.control = wx.TextCtrl(
parent, -1, self.str_value, style=wx.TE_PROCESS_ENTER
)
self.control.Bind(wx.EVT_KILL_FOCUS, self.update_object)
parent.Bind(
wx.EVT_TEXT_ENTER, self.update_object, id=self.control.GetId()
)
self.set_tooltip()
def dispose(self):
"""Disposes of the contents of an editor."""
if self.control is not None: # just in-case
parent = self.control.GetParent()
parent.Unbind(
wx.EVT_TEXT_ENTER,
handler=self.update_object,
id=self.control.GetId(),
)
self.control.Unbind(wx.EVT_KILL_FOCUS, handler=self.update_object)
super().dispose()
def update_object(self, event):
"""Handles the user changing the contents of the edit control."""
if isinstance(event, wx.FocusEvent):
event.Skip()
try:
self.value = self.control.GetValue()
except TraitError as excp:
pass
class ReadonlyEditor(Editor):
"""Base class for read-only style editors, which displays a read-only text
field, containing a text representation of the object trait value.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
# layout_style = 0 # Style for imbedding control in a sizer (override)
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
if (self.item.resizable is True) or (self.item.height != -1.0):
self.control = wx.TextCtrl(
parent,
-1,
self.str_value,
style=wx.NO_BORDER | wx.TE_MULTILINE | wx.TE_READONLY,
)
self.control.SetBackgroundColour(WindowColor)
else:
self.control = wx.StaticText(
parent, -1, self.str_value, style=wx.ALIGN_LEFT
)
self.layout_style = 0
self.set_tooltip()
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
new_value = self.str_value
if (self.item.resizable is True) or (self.item.height != -1.0):
if self.control.GetValue() != new_value:
self.control.SetValue(new_value)
elif self.control.GetLabel() != new_value:
self.control.SetLabel(new_value) |
298,832 | test wer 3 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test error rate."""
import unittest
from paddlespeech.s2t.utils import error_rate
class TestParse(unittest.TestCase):
def test_wer_1(self):
ref = 'i UM the PHONE IS i LEFT THE portable PHONE UPSTAIRS last night'
hyp = 'i GOT IT TO the FULLEST i LOVE TO portable FROM OF STORES last '\
'night'
word_error_rate = error_rate.wer(ref, hyp)
self.assertTrue(abs(word_error_rate - 0.769230769231) < 1e-6)
def test_wer_2(self):
ref = 'as any in england i would say said gamewell proudly that is '\
'in his day'
hyp = 'as any in england i would say said came well proudly that is '\
'in his day'
word_error_rate = error_rate.wer(ref, hyp)
self.assertTrue(abs(word_error_rate - 0.1333333) < 1e-6)
def METHOD_NAME(self):
ref = 'the lieutenant governor lilburn w boggs afterward governor '\
'was a pronounced mormon hater and throughout the period of '\
'the troubles he manifested sympathy with the persecutors'
hyp = 'the lieutenant governor little bit how bags afterward '\
'governor was a pronounced warman hater and throughout the '\
'period of th troubles he manifests sympathy with the '\
'persecutors'
word_error_rate = error_rate.wer(ref, hyp)
self.assertTrue(abs(word_error_rate - 0.2692307692) < 1e-6)
def test_wer_4(self):
ref = 'the wood flamed up splendidly under the large brewing copper '\
'and it sighed so deeply'
hyp = 'the wood flame do splendidly under the large brewing copper '\
'and its side so deeply'
word_error_rate = error_rate.wer(ref, hyp)
self.assertTrue(abs(word_error_rate - 0.2666666667) < 1e-6)
def test_wer_5(self):
ref = 'all the morning they trudged up the mountain path and at noon '\
'unc and ojo sat on a fallen tree trunk and ate the last of '\
'the bread which the old munchkin had placed in his pocket'
hyp = 'all the morning they trudged up the mountain path and at noon '\
'unc in ojo sat on a fallen tree trunk and ate the last of '\
'the bread which the old munchkin had placed in his pocket'
word_error_rate = error_rate.wer(ref, hyp)
self.assertTrue(abs(word_error_rate - 0.027027027) < 1e-6)
def test_wer_6(self):
ref = 'i UM the PHONE IS i LEFT THE portable PHONE UPSTAIRS last night'
word_error_rate = error_rate.wer(ref, ref)
self.assertEqual(word_error_rate, 0.0)
def test_wer_7(self):
ref = ' '
hyp = 'Hypothesis sentence'
with self.assertRaises(ValueError):
word_error_rate = error_rate.wer(ref, hyp)
def test_cer_1(self):
ref = 'werewolf'
hyp = 'weae wolf'
char_error_rate = error_rate.cer(ref, hyp)
self.assertTrue(abs(char_error_rate - 0.25) < 1e-6)
def test_cer_2(self):
ref = 'werewolf'
hyp = 'weae wolf'
char_error_rate = error_rate.cer(ref, hyp, remove_space=True)
self.assertTrue(abs(char_error_rate - 0.125) < 1e-6)
def test_cer_3(self):
ref = 'were wolf'
hyp = 'were wolf'
char_error_rate = error_rate.cer(ref, hyp)
self.assertTrue(abs(char_error_rate - 0.0) < 1e-6)
def test_cer_4(self):
ref = 'werewolf'
char_error_rate = error_rate.cer(ref, ref)
self.assertEqual(char_error_rate, 0.0)
def test_cer_5(self):
ref = u'我是中国人'
hyp = u'我是 美洲人'
char_error_rate = error_rate.cer(ref, hyp)
self.assertTrue(abs(char_error_rate - 0.6) < 1e-6)
def test_cer_6(self):
ref = u'我 是 中 国 人'
hyp = u'我 是 美 洲 人'
char_error_rate = error_rate.cer(ref, hyp, remove_space=True)
self.assertTrue(abs(char_error_rate - 0.4) < 1e-6)
def test_cer_7(self):
ref = u'我是中国人'
char_error_rate = error_rate.cer(ref, ref)
self.assertFalse(char_error_rate, 0.0)
def test_cer_8(self):
ref = ''
hyp = 'Hypothesis'
with self.assertRaises(ValueError):
char_error_rate = error_rate.cer(ref, hyp)
if __name__ == '__main__':
unittest.main() |
298,833 | safe html | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities that may be especially useful to plugins."""
import threading
from bleach.sanitizer import Cleaner
# pylint: disable=g-bad-import-order
# Google-only: import markdown_freewisdom
import markdown
from tensorboard import context as _context
from tensorboard.backend import experiment_id as _experiment_id
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
_ALLOWED_ATTRIBUTES = {
"a": ["href", "title"],
"img": ["src", "title", "alt"],
}
_ALLOWED_TAGS = [
"ul",
"ol",
"li",
"p",
"pre",
"code",
"blockquote",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"br",
"strong",
"em",
"a",
"img",
"table",
"thead",
"tbody",
"td",
"tr",
"th",
]
# Cache Markdown converter to avoid expensive initialization at each
# call to `markdown_to_safe_html`. Cache a different instance per thread.
class _MarkdownStore(threading.local):
def __init__(self):
self.markdown = markdown.Markdown(
extensions=[
"markdown.extensions.tables",
"markdown.extensions.fenced_code",
]
)
_MARKDOWN_STORE = _MarkdownStore()
# Cache Cleaner to avoid expensive initialization at each call to `clean`.
# Cache a different instance per thread.
class _CleanerStore(threading.local):
def __init__(self):
self.cleaner = Cleaner(
tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES
)
_CLEANER_STORE = _CleanerStore()
def METHOD_NAME(unsafe_string):
"""Return the input as a str, sanitized for insertion into the DOM.
Arguments:
unsafe_string: A Unicode string or UTF-8--encoded bytestring
possibly containing unsafe HTML markup.
Returns:
A string containing safe HTML.
"""
total_null_bytes = 0
if isinstance(unsafe_string, bytes):
unsafe_string = unsafe_string.decode("utf-8")
return _CLEANER_STORE.cleaner.clean(unsafe_string)
def markdown_to_safe_html(markdown_string):
"""Convert Markdown to HTML that's safe to splice into the DOM.
Arguments:
markdown_string: A Unicode string or UTF-8--encoded bytestring
containing Markdown source. Markdown tables are supported.
Returns:
A string containing safe HTML.
"""
return markdowns_to_safe_html([markdown_string], lambda xs: xs[0])
def markdowns_to_safe_html(markdown_strings, combine):
"""Convert multiple Markdown documents to one safe HTML document.
One could also achieve this by calling `markdown_to_safe_html`
multiple times and combining the results. Compared to that approach,
this function may be faster, because HTML sanitization (which can be
expensive) is performed only once rather than once per input. It may
also be less precise: if one of the input documents has unsafe HTML
that is sanitized away, that sanitization might affect other
documents, even if those documents are safe.
Args:
markdown_strings: List of Markdown source strings to convert, as
Unicode strings or UTF-8--encoded bytestrings. Markdown tables
are supported.
combine: Callback function that takes a list of unsafe HTML
strings of the same shape as `markdown_strings` and combines
them into a single unsafe HTML string, which will be sanitized
and returned.
Returns:
A string containing safe HTML.
"""
unsafe_htmls = []
total_null_bytes = 0
for source in markdown_strings:
# Convert to utf-8 whenever we have a binary input.
if isinstance(source, bytes):
source_decoded = source.decode("utf-8")
# Remove null bytes and warn if there were any, since it probably means
# we were given a bad encoding.
source = source_decoded.replace("\x00", "")
total_null_bytes += len(source_decoded) - len(source)
unsafe_html = _MARKDOWN_STORE.markdown.convert(source)
unsafe_htmls.append(unsafe_html)
unsafe_combined = combine(unsafe_htmls)
sanitized_combined = _CLEANER_STORE.cleaner.clean(unsafe_combined)
warning = ""
if total_null_bytes:
warning = (
"<!-- WARNING: discarded %d null bytes in markdown string "
"after UTF-8 decoding -->\n"
) % total_null_bytes
return warning + sanitized_combined
def context(environ):
"""Get a TensorBoard `RequestContext` from a WSGI environment.
Returns:
A `RequestContext` value.
"""
return _context.from_environ(environ)
def experiment_id(environ):
"""Determine the experiment ID associated with a WSGI request.
Each request to TensorBoard has an associated experiment ID, which is
always a string and may be empty. This experiment ID should be passed
to data providers.
Args:
environ: A WSGI environment `dict`. For a Werkzeug request, this is
`request.environ`.
Returns:
A experiment ID, as a possibly-empty `str`.
"""
return environ.get(_experiment_id.WSGI_ENVIRON_KEY, "")
class _MetadataVersionChecker:
"""TensorBoard-internal utility for warning when data is too new.
Specify a maximum known `version` number as stored in summary
metadata, and automatically reject and warn on data from newer
versions. This keeps a (single) bit of internal state to handle
logging a warning to the user at most once.
This should only be used by plugins bundled with TensorBoard, since
it may instruct users to upgrade their copy of TensorBoard.
"""
def __init__(self, data_kind, latest_known_version):
"""Initialize a `_MetadataVersionChecker`.
Args:
data_kind: A human-readable description of the kind of data
being read, like "scalar" or "histogram" or "PR curve".
latest_known_version: Highest tolerated value of `version`,
like `0`.
"""
self._data_kind = data_kind
self._latest_known_version = latest_known_version
self._warned = False
def ok(self, version, run, tag):
"""Test whether `version` is permitted, else complain."""
if 0 <= version <= self._latest_known_version:
return True
self._maybe_warn(version, run, tag)
return False
def _maybe_warn(self, version, run, tag):
if self._warned:
return
self._warned = True
logger.warning(
"Some %s data is too new to be read by this version of TensorBoard. "
"Upgrading TensorBoard may fix this. "
"(sample: run %r, tag %r, data version %r)",
self._data_kind,
run,
tag,
version,
) |
298,834 | get session | # -*- coding: utf-8; -*-
#
# (c) 2022 siveo, http://www.siveo.net
#
# This file is part of Pulse 2, http://www.siveo.net
#
# Pulse 2 is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pulse 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pulse 2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import json
import requests
from requests.structures import CaseInsensitiveDict
from mmc.plugins.urbackup import config
try:
from urllib import urlencode
except BaseException:
from urllib.parse import urlencode
class UrApiWrapper:
url = ""
user_login = ""
password = ""
ses = ""
headers = {}
verify = False
allow_redirects = True
def __init__(self):
_config = config.UrbackupConfig()
self.url = _config.urbackup_url
self.user_login = _config.urbackup_username
self.password = _config.urbackup_password
self.ses = "" # sessionid
self.headers = CaseInsensitiveDict()
self.headers["Accept"] = "application/json"
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
self.verify = False
self.allow_redirects = True
def set_header(self, key, value):
self.headers[key] = value
def request(self, action, params, method="POST"):
url = self.url + "?" + urlencode({"a": action})
if method == "GET":
response = requests.get(
url,
headers=self.headers,
data=params,
verify=self.verify,
allow_redirects=self.allow_redirects,
)
if method == "POST":
response = requests.post(
url,
headers=self.headers,
data=params,
verify=self.verify,
allow_redirects=self.allow_redirects,
)
return response
def login(self, lang="en"):
params = {
"username": self.user_login,
"password": self.password,
"plainpw": 1,
"lang": lang,
}
response = self.request("login", params)
try:
result = json.loads(response.text)
if "session" in result:
self.ses = result["session"]
except BaseException:
pass
return response
def METHOD_NAME(self):
self.login()
session = self.ses
return session
@staticmethod
def response(resp):
try:
resp_json = json.loads(resp.text)
except BaseException:
resp_json = resp.text
return {
"status_code": resp.status_code,
"headers": resp.headers,
"content": resp_json,
}
def get_logs(self, clientid=0):
self.login()
params = {"clientid": clientid, "lastid": 0, "ses": self.ses}
response = self.request("livelog", params)
return response
def add_client(self, clientname):
self.login()
params = {"clientname": clientname, "ses": self.ses}
response = self.request("add_client", params)
return response
def get_stats(self):
self.login()
params = {"ses": self.ses}
response = self.request("usage", params)
return response
def add_group(self, groupname):
self.login()
params = {"sa": "groupadd", "name": groupname, "ses": self.ses}
response = self.request("settings", params)
return response
def remove_group(self, groupid):
self.login()
params = {"sa": "groupremove", "id": groupid, "ses": self.ses}
response = self.request("settings", params)
return response
def get_settings_general(self):
self.login()
params = {"sa": "general", "ses": self.ses}
response = self.request("settings", params)
return response
def save_settings(self, clientid, name_data, value_data):
self.login()
params = {
"sa": "clientsettings_save",
"t_clientid": clientid,
"overwrite": "true",
name_data: value_data,
"ses": self.ses,
}
response = self.request("settings", params)
return response
def get_settings_clientsettings(self, id_client):
self.login()
params = {"sa": "clientsettings", "t_clientid": id_client, "ses": self.ses}
response = self.request("settings", params)
return response
def get_settings_clients(self):
self.login()
params = {"sa": "listusers", "ses": self.ses}
response = self.request("settings", params)
return response
def get_backups(self, client_id):
self.login()
params = {"clientid": client_id, "ses": self.ses}
response = self.request("backups", params)
return response
def delete_backup(self, client_id, backup_id):
self.login()
params = {
"sa": "backups",
"clientid": client_id,
"delete_now": backup_id,
"ses": self.ses,
}
response = self.request("backups", params)
return response
def get_backup_files(self, client_id, backup_id, path):
self.login()
params = {
"sa": "files",
"clientid": client_id,
"backupid": backup_id,
"path": path,
"ses": self.ses,
}
response = self.request("backups", params)
return response
def client_download_backup_file(self, client_id, backup_id, path, filter_path):
self.login()
params = {
"sa": "clientdl",
"clientid": client_id,
"backupid": backup_id,
"path": path,
"filter": filter_path,
"ses": self.ses,
}
response = self.request("backups", params)
return response
def client_download_backup_file_shahash(self, client_id, backup_id, path, shahash):
self.login()
params = {
"sa": "clientdl",
"clientid": client_id,
"backupid": backup_id,
"path": path,
"ses": self.ses,
"shahash": shahash,
}
response = self.request("backups", params)
return response
def get_progress(self):
self.login()
params = {"ses": self.ses}
response = self.request("progress", params)
return response
def get_status(self):
self.login()
params = {"ses": self.ses}
response = self.request("status", params)
return response
def create_backup(self, type_backup, client_id):
self.login()
params = {"start_type": type_backup, "start_client": client_id, "ses": self.ses}
response = self.request("start_backup", params)
return response |
298,835 | add hooks | #!/usr/bin/env python
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import sys
import contextlib
from collections import OrderedDict
import paddle
from paddle.hapi.static_flops import Table
from paddle.hapi.dynamic_flops import (count_parameters, register_hooks,
count_io_info)
from paddleseg.utils import get_sys_env, logger, op_flops_funs, utils
from paddlepanseg.cvlibs import Config, make_default_builder
def parse_args():
parser = argparse.ArgumentParser(description="Model analysis")
parser.add_argument('--config', dest='cfg', help="Config file.", type=str)
parser.add_argument(
'--input_shape',
nargs='+',
type=int,
help="Shape of the input shape, e.g. `--input_shape 1 3 1024 1024`",
default=[1, 3, 1024, 1024])
parser.add_argument(
'--num_levels',
type=int,
help="Maximum levels of layers to show.",
default=None)
return parser.parse_args()
@contextlib.contextmanager
def _redirect_stdout_to_str(*args, **kwargs):
with io.StringIO() as stdout:
old_stdout = sys.stdout
sys.stdout = stdout
try:
yield stdout
finally:
sys.stdout = old_stdout
def _count_layer_stats(layer, counters, level, res):
info = OrderedDict()
info['Layer Name'] = layer.full_name()
info['Level'] = level
children = list(layer.children())
if len(children) > 0:
children_names = set(m.full_name() for m in children)
res_of_layer = []
for child in children:
res_of_layer = _count_layer_stats(child, counters, level + 1,
res_of_layer)
for name in counters.keys():
info[name] = sum(item[name] for item in res_of_layer
if item['Layer Name'] in children_names)
res.append(info)
res.extend(res_of_layer)
else:
# XXX: Hard-code default items
if hasattr(layer, 'input_shape'):
info['Input Shape'] = layer.input_shape.numpy().tolist()
if hasattr(layer, 'output_shape'):
info['Output Shape'] = layer.output_shape.numpy().tolist()
for name, cnter in counters.items():
info[name] = cnter(layer)
res.append(info)
return res
def _stats_to_table(stats, cols):
levels = set(info['Level'] for info in stats)
min_level = min(levels)
num_pad_cols = max(levels) - min_level
# Assume that the first column is Layer Name
cols = cols[:1] + [''] * num_pad_cols + cols[1:]
table = Table(cols)
for info in stats:
level = info['Level']
row = [info.get(key, '') for key in cols if key != '']
# Round float numbers
for i, ele in enumerate(row):
if isinstance(ele, float):
row[i] = _round(ele)
rel_level = (level - min_level)
row = [''] * rel_level + [row[0]] + [''] * (num_pad_cols - rel_level
) + row[1:]
table.add_row(row)
return table
def _round(x, digits=3):
return round(x, digits)
def _to_mega(x):
return float(x / 1e6)
def _to_giga(x):
return float(x / 1e9)
def dynamic_flops(model, inputs, custom_ops=None, num_levels=None):
def METHOD_NAME(m):
if len(list(m.children())) > 0:
return
m.register_buffer('total_ops', paddle.zeros([1], dtype='int64'))
m.register_buffer('total_params', paddle.zeros([1], dtype='int64'))
m_type = type(m)
flops_fn = None
if m_type in custom_ops:
flops_fn = custom_ops[m_type]
if m_type not in types_collection:
print("Customize Function has been applied to {}.".format(
m_type))
elif m_type in register_hooks:
flops_fn = register_hooks[m_type]
if m_type not in types_collection:
print("{}'s FLOPs has been counted.".format(m_type))
else:
if m_type not in types_collection:
print(
"Cannot find suitable count function for {}. Treat it as zero FLOPs."
.format(m_type))
if flops_fn is not None:
flops_handler = m.register_forward_post_hook(flops_fn)
handler_collection.append(flops_handler)
params_handler = m.register_forward_post_hook(count_parameters)
io_handler = m.register_forward_post_hook(count_io_info)
handler_collection.append(params_handler)
handler_collection.append(io_handler)
types_collection.add(m_type)
if num_levels is not None and num_levels < 1:
raise ValueError("`num_levels` must be a positive integer.")
handler_collection = []
types_collection = set()
if custom_ops is None:
custom_ops = {}
training = model.training
model.eval()
model.apply(METHOD_NAME)
with paddle.no_grad():
model(inputs)
if training:
model.train()
for handler in handler_collection:
handler.remove()
counters = {
'Params (M)': lambda m: _to_mega(m.total_params),
'FLOPs (G)': lambda m: _to_giga(m.total_ops)
}
stats = _count_layer_stats(model, counters, 1, [])
if num_levels is not None:
stats = list(filter(lambda info: info['Level'] <= num_levels, stats))
table = _stats_to_table(
stats, ['Layer Name', 'Input Shape', 'Output Shape', *counters.keys()])
with _redirect_stdout_to_str() as sio:
table.print_table()
tab_info = sio.getvalue()
logger.info('\n' + tab_info)
def analyze(args, cfg):
custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}
inputs = paddle.randn(args.input_shape)
builder = make_default_builder(cfg)
dynamic_flops(
builder.model,
inputs,
custom_ops=custom_ops,
num_levels=args.num_levels)
if __name__ == '__main__':
args = parse_args()
if not args.cfg:
raise RuntimeError("No configuration file has been specified.")
cfg = Config(args.cfg)
utils.show_env_info()
utils.show_cfg_info(cfg)
logger.info("input_shape:")
logger.info(args.input_shape)
paddle.set_device('cpu')
analyze(args, cfg) |
298,836 | test generate questions empty data | from augmentation.paraphrase.gpt3.generator import GPT3ParaphraseGenerator
from augmentation.paraphrase.gpt3.models import GPTRequest
from augmentation.paraphrase.gpt3.gpt import GPT
import openai
import pytest
import responses
def mock_create(*args, **kwargs):
class MockOutput:
class MockText:
text = "Response text from gpt3"
choices = [MockText(), MockText()]
return MockOutput()
def mock_submit_request(*args, **kwargs):
class MockOutput:
class MockText:
def __init__(self, text):
self.text = text
choices = [
MockText("output: Are there any further test questions?"),
MockText("output: Are there any further test questions."),
MockText("output: Are there any more test questions?Input: My friend has an athletic scholarship to the University of Arkansas"),
MockText("output: Is there another test question?"),
MockText("output: Is there another test question"),
MockText("output: Is there another Test Question?"),
MockText("output: Are there any more test questions?"),
MockText("output: Are there any more test questions."),
MockText("output:Are there more test questions?"),
MockText("output:"),
MockText("output: "),
MockText("output: ."),
MockText("output:?")
]
return MockOutput()
def test_questions_set_generation(monkeypatch):
monkeypatch.setattr(GPT, 'submit_request', mock_submit_request)
request_data = GPTRequest(api_key="MockKey",
data=["Are there any more test questions?"], num_responses=13)
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
augmented_questions = gpt3_generator.paraphrases()
expected_augmented_questions = {
"Are there any further test questions?",
"Is there another test question?",
"Are there more test questions?"
}
assert augmented_questions == expected_augmented_questions
def test_generate_questions(monkeypatch):
monkeypatch.setattr(openai.Completion, 'create', mock_create)
request_data = GPTRequest(api_key="MockKey",
data=["Are there any more test questions?"], num_responses=2)
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
augmented_questions = gpt3_generator.paraphrases()
assert augmented_questions == {"Response text from gpt3"}
def test_generate_questions_empty_api_key(monkeypatch):
monkeypatch.setattr(openai.Completion, 'create', mock_create)
request_data = GPTRequest(api_key="",
data=["Are there any more test questions?"], num_responses=2)
with pytest.raises(Exception):
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
gpt3_generator.paraphrases()
def METHOD_NAME(monkeypatch):
monkeypatch.setattr(openai.Completion, 'create', mock_create)
request_data = GPTRequest(api_key="MockKey",
data=[], num_responses=2)
with pytest.raises(Exception):
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
gpt3_generator.paraphrases()
request_data = GPTRequest(api_key="MockKey",
data=[""], num_responses=2)
with pytest.raises(Exception):
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
gpt3_generator.paraphrases()
request_data = GPTRequest(api_key="MockKey",
data=["Are there any more test questions?", "Are there more questions?", ""],
num_responses=2)
with pytest.raises(Exception):
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
gpt3_generator.paraphrases()
request_data = GPTRequest(api_key="MockKey",
data=["Are there any more test questions?", "Are there more questions?"],
num_responses=2)
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
resp = gpt3_generator.paraphrases()
assert resp == {'Response text from gpt3'}
@responses.activate
def test_generate_questions_invalid_api_key():
from openai import APIError
responses.add(url="https://api.openai.com/v1/engines/davinci/completions",
method="POST",
status=500,
body="Incorrect API key provided: InvalidKey. You can find your API key at https://beta.openai.com.")
request_data = GPTRequest(api_key="InvalidKey",
data=["Are there any more test questions?"], num_responses=2)
gpt3_generator = GPT3ParaphraseGenerator(request_data=request_data)
with pytest.raises(APIError, match=r'.*Incorrect API key provided: InvalidKey. You can find your API key at https://beta.openai.com..*'):
gpt3_generator.paraphrases()
|
298,837 | test open new securely permissioned file perm | import io
import os
import stat
import pytest
from tests.monkey_island.utils import assert_linux_permissions, assert_windows_permissions
from common.utils.environment import is_windows_os
from common.utils.file_utils import (
append_bytes,
create_secure_directory,
make_fileobj_copy,
open_new_securely_permissioned_file,
)
from common.utils.file_utils.secure_directory import FailedDirectoryCreationError
@pytest.fixture
def test_path_nested(tmp_path):
path = tmp_path / "test1" / "test2" / "test3"
return path
@pytest.fixture
def test_path(tmp_path):
test_path = "test1"
path = tmp_path / test_path
return path
def test_create_secure_directory__no_parent_dir(test_path_nested):
with pytest.raises(Exception):
create_secure_directory(test_path_nested)
def test_open_new_securely_permissioned_file__already_exists(test_path):
os.close(os.open(test_path, os.O_CREAT, stat.S_IRWXU))
assert os.path.isfile(test_path)
with pytest.raises(Exception):
with open_new_securely_permissioned_file(test_path):
pass
def test_open_new_securely_permissioned_file__no_parent_dir(test_path_nested):
with pytest.raises(Exception):
with open_new_securely_permissioned_file(test_path_nested):
pass
def test_open_new_securely_permissioned_file__write(test_path):
TEST_STR = b"Hello World"
with open_new_securely_permissioned_file(test_path, "wb") as f:
f.write(TEST_STR)
with open(test_path, "rb") as f:
assert f.read() == TEST_STR
def test_create_secure_directory__path_exists_as_file(test_path):
with open(test_path, "w"):
with pytest.raises(FailedDirectoryCreationError):
create_secure_directory(test_path)
# Linux-only tests
@pytest.mark.skipif(is_windows_os(), reason="Tests Posix (not Windows) permissions.")
def test_create_secure_directory__already_exists_secure_linux(test_path):
test_path.mkdir(mode=stat.S_IRWXU)
create_secure_directory(test_path)
assert_linux_permissions(test_path)
@pytest.mark.skipif(is_windows_os(), reason="Tests Posix (not Windows) permissions.")
def test_create_secure_directory__already_exists_insecure_linux(test_path):
test_path.mkdir(mode=0o777)
create_secure_directory(test_path)
assert_linux_permissions(test_path)
@pytest.mark.skipif(is_windows_os(), reason="Tests Posix (not Windows) permissions.")
def test_create_secure_directory__perm_linux(test_path):
create_secure_directory(test_path)
assert_linux_permissions(test_path)
@pytest.mark.skipif(is_windows_os(), reason="Tests Posix (not Windows) permissions.")
def test_open_new_securely_permissioned_file__perm_linux(test_path):
with open_new_securely_permissioned_file(test_path):
pass
st = os.stat(test_path)
expected_mode = stat.S_IRUSR | stat.S_IWUSR
actual_mode = st.st_mode & (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
assert expected_mode == actual_mode
# Windows-only tests
@pytest.mark.skipif(not is_windows_os(), reason="Tests Windows (not Posix) permissions.")
def test_create_secure_directory__already_exists_secure_windows(test_path):
# creates a new secure directory
create_secure_directory(test_path)
# attempts to create a new secure directory when one already exists
create_secure_directory(test_path)
assert_windows_permissions(test_path)
@pytest.mark.skipif(not is_windows_os(), reason="Tests Windows (not Posix) permissions.")
def test_create_secure_directory__already_exists_insecure_windows(test_path):
test_path.mkdir()
create_secure_directory(test_path)
assert_windows_permissions(test_path)
@pytest.mark.skipif(not is_windows_os(), reason="Tests Windows (not Posix) permissions.")
def test_create_secure_directory__perm_windows(test_path):
create_secure_directory(test_path)
assert_windows_permissions(test_path)
@pytest.mark.skipif(not is_windows_os(), reason="Tests Windows (not Posix) permissions.")
def METHOD_NAME(test_path):
with open_new_securely_permissioned_file(test_path):
pass
assert_windows_permissions(test_path)
def test_make_fileobj_copy():
TEST_STR = b"Hello World"
with io.BytesIO(TEST_STR) as src:
dst = make_fileobj_copy(src)
# Writing the assertion this way verifies that both src and dest file handles have had
# their positions reset to 0.
assert src.read() == TEST_STR
assert dst.read() == TEST_STR
def test_make_fileobj_copy_seek_src_to_0():
TEST_STR = b"Hello World"
with io.BytesIO(TEST_STR) as src:
src.seek(int(len(TEST_STR) / 2))
dst = make_fileobj_copy(src)
# Writing the assertion this way verifies that both src and dest file handles have had
# their positions reset to 0.
assert src.read() == TEST_STR
assert dst.read() == TEST_STR
def test_append_bytes__pos_0():
bytes_io = io.BytesIO(b"1234 5678")
append_bytes(bytes_io, b"abcd")
assert bytes_io.read() == b"1234 5678abcd"
def test_append_bytes__pos_5():
bytes_io = io.BytesIO(b"1234 5678")
bytes_io.seek(5, io.SEEK_SET)
append_bytes(bytes_io, b"abcd")
assert bytes_io.read() == b"5678abcd"
bytes_io.seek(0, io.SEEK_SET)
assert bytes_io.read() == b"1234 5678abcd" |
298,838 | is allowed by role hierarchy | """
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord.ext.commands.converter import Converter, RoleConverter
from rapidfuzz import process
from redbot.core import commands
from redbot.core.commands import BadArgument
from unidecode import unidecode
def METHOD_NAME(
bot,
bot_me: discord.Member,
mod: discord.Member,
role: discord.Role,
):
if role >= bot_me.top_role:
return (False, f"I am not higher than `{role}` in hierarchy.")
else:
return (
(mod.top_role > role) or mod == mod.guild.owner,
f"You are not higher than `{role}` in hierarchy.",
)
class LevelConverter(Converter):
async def convert(self, ctx: commands.Context, argument: str) -> int:
try:
level = int(argument)
except ValueError:
raise BadArgument
if level not in range(4):
raise BadArgument(
"This is not a valid Trust Level. The valid Levels are: 0, 1, 2, and 3."
)
else:
return level
class ActionConverter(Converter):
async def convert(self, ctx: commands.Context, argument: str) -> str:
if argument.lower() not in ["kick", "ban"]:
raise BadArgument(
"This is not a valid action. The valid actions are kick and ban. For roles, supply a role."
)
return argument.lower()
# original converter from https://github.com/TrustyJAID/Trusty-cogs/blob/master/serverstats/converters.py#L19
class FuzzyRole(RoleConverter):
"""
This will accept role ID's, mentions, and perform a fuzzy search for
roles within the guild and return a list of role objects
matching partial names
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
def __init__(self, response: bool = True):
self.response = response
super().__init__()
async def convert(self, ctx: commands.Context, argument: str) -> discord.Role:
try:
basic_role = await super().convert(ctx, argument)
except BadArgument:
pass
else:
return basic_role
guild = ctx.guild
result = [
(r[2], r[1])
for r in process.extract(
argument,
{r: unidecode(r.name) for r in guild.roles},
limit=None,
score_cutoff=75,
)
]
if not result:
raise BadArgument(f'Role "{argument}" not found.' if self.response else None)
sorted_result = sorted(result, key=lambda r: r[1], reverse=True)
return sorted_result[0][0]
class StrictRole(FuzzyRole):
def __init__(self, response: bool = True, *, check_integrated: bool = True):
self.response = response
self.check_integrated = check_integrated
super().__init__(response)
async def convert(self, ctx: commands.Context, argument: str) -> discord.Role:
role = await super().convert(ctx, argument)
if self.check_integrated and role.managed:
raise BadArgument(
f"`{role}` is an integrated role and cannot be assigned."
if self.response
else None
)
allowed, message = METHOD_NAME(ctx.bot, ctx.me, ctx.author, role)
if not allowed:
raise BadArgument(message if self.response else None)
return role |
298,839 | add text | from abc import abstractmethod, ABC
from typing import Union, Any
import numpy as np
from PIL import Image
import torch
from super_gradients.common.sg_loggers.time_units import TimeUnit
class AbstractSGLogger(ABC):
"""
A SGLogger handles all outputs of the training process.
Every generated file, log, metrics value, image or other artifacts produced by the trainer will be processed and saved.
Inheriting SGLogger can be used in order to integrate experiment management framework, special storage setting, a specific logging library etc.
Important: The BaseSGLogger class (inheriting from SGLogger) is used by the trainer by default. When defining your own SGLogger you will
override all default output functionality. No files will saved to disk and no data will be collected.
Make sure you either implement this functionality or use SGLoggers.Compose([BaseSGLogger(...), YourSGLogger(...)]) to build on top of it.
"""
@abstractmethod
def add(self, tag: str, obj: Any, global_step: int = None):
"""
A generic function for adding any type of data to the SGLogger. By default, this function is not called by the Trainer, BaseSGLogger
does nothing with this type of data. But if you need to pass a data type which is not supported by any of the following abstract methods, use this
method.
"""
raise NotImplementedError
@abstractmethod
def add_config(self, tag: str, config: dict):
"""
Add the configuration (settings and hyperparameters) to the SGLoggers.
Typically, this function will add the configuration dictionary to logs,
write it to tensorboard, send it to an experiment management framework ect.
:param tag: Data identifier
:param config: a dictionary of the experiment config
"""
raise NotImplementedError
@abstractmethod
def add_scalar(self, tag: str, scalar_value: float, global_step: Union[int, TimeUnit] = None):
"""
Add scalar data to SGLogger.
Typically, this function will add scalar to tensorboard or other experiment management framework.
:param tag: Data identifier
:param scalar_value: Value to save
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_scalars(self, tag_scalar_dict: dict, global_step: int = None):
"""
Adds multiple scalar data to SGLogger.
Typically, this function will add scalars to tensorboard or other experiment management framework.
:param tag_scalar_dict: a dictionary {tag(str): value(float)} of the scalars.
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_image(self, tag: str, image: Union[torch.Tensor, np.array, Image.Image], data_format: str = "CHW", global_step: int = None):
"""
Add a single image to SGLogger.
Typically, this function will add an image to tensorboard, save it to disk or add it to experiment management framework.
:param tag: Data identifier
:param image: an image to be added. The values should lie in [0, 255] for type uint8 or [0, 1] for type float.
:param data_format: Image data format specification of the form CHW, HWC, HW, WH, etc.
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_images(self, tag: str, images: Union[torch.Tensor, np.array], data_format="NCHW", global_step: int = None):
"""
Add multiple images to SGLogger.
Typically, this function will add images to tensorboard, save them to disk or add them to experiment management framework.
:param tag: Data identifier
:param images: images to be added. The values should lie in [0, 255] for type uint8 or [0, 1] for type float.
:param data_format: Image data format specification of the form NCHW, NHWC, NHW, NWH, etc.
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_histogram(self, tag: str, values: Union[torch.Tensor, np.array], bins: Union[str, np.array, list, int] = "auto", global_step: int = None):
"""
Add a histogram to SGLogger.
Typically, this function will add a histogram to tensorboard or add it to experiment management framework.
:param tag: Data identifier
:param values: Values to build histogram
:param bins: This determines how the bins are made.
If bins is an int, it defines the number of equal-width bins in the given range
If bins is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths.
If bins is a string, it defines the method used to calculate the optimal bin width, as defined by
https://numpy.org/doc/stable/reference/generated/numpy.histogram_bin_edges.html#numpy.histogram_bin_edges
one of [‘sqrt’, ’auto’, ‘fd’, ‘doane’, ‘scott’, ‘stone’...]
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def METHOD_NAME(self, tag: str, text_string: str, global_step: int = None):
"""
Add a text to SGLogger.
Typically, this function will add a text to tensorboard or add it to experiment management framework.
:param tag: Data identifier
:param text_string: the text to be added
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_checkpoint(self, tag: str, state_dict: dict, global_step: int = None):
"""
Add a checkpoint to SGLogger
Typically, this function will write a torch file to disk, upload it to remote storage or to experiment management framework.
:param tag: Data identifier
:param state_dict: the state dict to save. The state dict includes more than just the model weight and may include any of:
net: model weights
acc: current accuracy (depends on metrics)
epoch: current epoch
optimizer_state_dict: optimizer state
scaler_state_dict: torch.amp.scaler sate
:param global_step: Global step value to record
"""
raise NotImplementedError
@abstractmethod
def add_file(self, file_name: str = None):
"""
Add a file from the checkpoint directory to the logger (usually, upload the file or adds it to an artifact)
"""
raise NotImplementedError
@abstractmethod
def upload(self):
"""
Upload any files which should be stored on remote storage
"""
raise NotImplementedError
@abstractmethod
def flush(self):
"""
Flush the SGLogger's cache
"""
raise NotImplementedError
@abstractmethod
def close(self):
"""
Close the SGLogger
"""
raise NotImplementedError
@abstractmethod
def local_dir(self) -> str:
"""
A getter for the full/absolute path where all files are saved locally
:return:
"""
raise NotImplementedError
def download_remote_ckpt(self, ckpt_name: str, *args, **kwargs):
raise NotImplementedError |
298,840 | register | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
class DefaultBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def METHOD_NAME(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
site = get_current_site(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('registration_activation_complete', (), {}) |
298,841 | pbasis init | import itertools
import numpy as np
from .element import Element
from .discrete_field import DiscreteField
class ElementGlobal(Element):
"""Elements defined implicitly through global degrees-of-freedom."""
V = None # For caching inverse Vandermonde matrix
derivatives = 2 # By default, include first and second derivatives
tensorial_basis = False
def gbasis(self, mapping, X, i, tind=None):
if tind is None:
tind = np.arange(mapping.mesh.t.shape[1])
if self.V is None:
# initialize power basis
self.METHOD_NAME(self.maxdeg,
self.dim,
self.derivatives,
self.tensorial_basis)
# construct Vandermonde matrix and invert it
self.V = np.linalg.inv(self._eval_dofs(mapping.mesh))
V = self.V[tind]
x = mapping.F(X, tind=tind)
U = [np.zeros((self.dim,) * k + x[0].shape)
for k in range(self.derivatives + 1)]
N = len(self._pbasis[()])
# loop over new basis
for k in range(self.derivatives + 1):
diffs = list(itertools.product(*((list(range(self.dim)),) * k)))
for itr in range(N):
for diff in diffs:
U[k][diff] += (V[:, itr, i][:, None]
* self._pbasis[diff][itr](*x))
hod = {}
for k in range(self.derivatives - 2):
hod['grad{}'.format(k + 3)] = U[k + 3]
return (
DiscreteField(
value=U[0],
grad=U[1],
hess=U[2],
**hod
),
)
def _pbasis_create(self, i, j=None, k=None, dx=0, dy=0, dz=0):
"""Return a single power basis function."""
if j is None and k is None: # 1d
cx = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
return eval(("lambda x: {}*x**{}"
.format(cx, np.max([i - dx, 0]))))
elif k is None: # 2d
cx = 1
cy = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
if dy > 0:
for l in np.arange(dy, 0, -1):
cy *= j - dy + l
return eval(("lambda x, y: {}*x**{}*y**{}"
.format(cx * cy,
np.max([i - dx, 0]),
np.max([j - dy, 0]))))
else: # 3d
cx = 1
cy = 1
cz = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
if dy > 0:
for l in np.arange(dy, 0, -1):
cy *= j - dy + l
if dz > 0:
for l in np.arange(dz, 0, -1):
cz *= k - dz + l
return eval(("lambda x, y, z: {}*x**{}*y**{}*z**{}"
.format(cx * cy * cz,
np.max([i - dx, 0]),
np.max([j - dy, 0]),
np.max([k - dz, 0]),)))
def METHOD_NAME(self, maxdeg, dim, Ndiff, is_tensorial=False):
"""Define power bases.
Parameters
----------
maxdeg
Maximum degree of the basis
dim
Dimension of the domain.x
Ndiff
Number of derivatives to include.
"""
if is_tensorial:
maxdeg = int(maxdeg / 2)
self._pbasis = {}
for k in range(Ndiff + 1):
diffs = list(itertools.product(*((list(range(dim)),) * k)))
for diff in diffs:
# desc = ''.join([str(d) for d in diff])
dx = sum([1 for d in diff if d == 0])
dy = sum([1 for d in diff if d == 1]) if dim >= 2 else None
dz = sum([1 for d in diff if d == 2]) if dim >= 3 else None
if dim == 1:
self._pbasis[diff] = [
self._pbasis_create(i=i, dx=dx)
for i in range(maxdeg + 1)
if i <= maxdeg
]
elif dim == 2:
self._pbasis[diff] = [
self._pbasis_create(i=i, j=j, dx=dx, dy=dy)
for i in range(maxdeg + 1)
for j in range(maxdeg + 1)
if is_tensorial or i + j <= maxdeg
]
elif dim == 3:
self._pbasis[diff] = [
self._pbasis_create(i=i, j=j, k=k, dx=dx, dy=dy, dz=dz)
for i in range(maxdeg + 1)
for j in range(maxdeg + 1)
for k in range(maxdeg + 1)
if is_tensorial or i + j + k <= maxdeg
]
def _eval_dofs(self, mesh, tind=None):
if tind is None:
tind = np.arange(mesh.t.shape[1])
N = len(self._pbasis[()])
V = np.zeros((len(tind), N, N))
w = {
'v': np.array([mesh.p[:, mesh.t[itr, tind]]
for itr in range(mesh.t.shape[0])]),
}
if mesh.p.shape[0] == 2:
w['e'] = np.array([
.5 * (w['v'][itr] + w['v'][(itr + 1) % mesh.t.shape[0]])
for itr in range(mesh.t.shape[0])
])
w['n'] = np.array([
w['v'][itr] - w['v'][(itr + 1) % mesh.t.shape[0]]
for itr in range(mesh.t.shape[0])
])
w['n'][2] = -w['n'][2] # direction swapped due to mesh numbering
for itr in range(3):
w['n'][itr] = np.array([w['n'][itr, 1, :],
-w['n'][itr, 0, :]])
w['n'][itr] /= np.linalg.norm(w['n'][itr], axis=0)
# evaluate dofs, gdof implemented in subclasses
for itr in range(N):
for jtr in range(N):
F = {k: self._pbasis[k][itr] for k in self._pbasis}
V[:, jtr, itr] = self.gdof(F, w, jtr)
return V |
298,842 | test two vectors | #!/usr/bin/env python
"""
Unit tests for the rotmat library
"""
from __future__ import absolute_import, print_function
from math import radians, degrees
import unittest
import random
import numpy as np
from pymavlink.rotmat import Vector3, Matrix3, Plane, Line
class VectorTest(unittest.TestCase):
"""
Class to test Vector3
"""
def __init__(self, *args, **kwargs):
"""Constructor, set up some data that is reused in many tests"""
super(VectorTest, self).__init__(*args, **kwargs)
def test_constructor(self):
"""Test the constructor functionality"""
v1 = Vector3(1, 0.2, -3)
v2 = Vector3([1, 0.2, -3])
v3 = Vector3([1, 0.3, -3])
assert v1 == v2
assert v1 != v3
assert str(v1) == "Vector3(1.00, 0.20, -3.00)"
def test_maths(self):
"""Test simple maths"""
v1 = Vector3(1, 2, -3)
v2 = Vector3(1, 3, 3)
assert v1 + v2 == Vector3(2, 5, 0)
assert v1 - v2 == Vector3(0, -1, -6)
assert (v1 * 3) == Vector3(3, 6, -9)
assert v1 * v2 == -2
assert v2 / 2.0 == Vector3(0.5, 1.5, 1.5)
assert v2 // 2.0 == Vector3(0, 1, 1)
assert v2 / 2.1 == Vector3(0.47619047619047616, 1.4285714285714286, 1.4285714285714286)
assert v2 // 2.1 == Vector3(0.0, 1.0, 1.0)
assert v1 % v2 == Vector3(15.00, -6.00, 1.00)
np.testing.assert_almost_equal(v2.length(), 4.358898943540674)
assert v2.normalized().close(Vector3(0.23, 0.69, 0.69), tol=1e-2)
np.testing.assert_almost_equal(v1.angle(v2), 1.693733631245806)
class MatrixTest(unittest.TestCase):
"""
Class to test Matrix3
"""
def __init__(self, *args, **kwargs):
"""Constructor, set up some data that is reused in many tests"""
super(MatrixTest, self).__init__(*args, **kwargs)
def test_constructor(self):
"""Test the constructor functionality"""
m1 = Matrix3(Vector3(1, 0, 0), Vector3(1, 5, 0), Vector3(1, 0, -7))
m2 = Matrix3()
assert str(m1) == 'Matrix3((1.00, 0.00, 0.00), (1.00, 5.00, 0.00), (1.00, 0.00, -7.00))'
assert str(m2) == 'Matrix3((1.00, 0.00, 0.00), (0.00, 1.00, 0.00), (0.00, 0.00, 1.00))'
def test_maths(self):
m1 = Matrix3(Vector3(1, 0, 0), Vector3(1, 5, 0), Vector3(1, 0, -7))
m2 = Matrix3()
assert m1 + m2 == Matrix3(Vector3(2, 0, 0), Vector3(1, 6, 0), Vector3(1, 0, -6))
assert m1 - m2 == Matrix3(Vector3(0, 0, 0), Vector3(1, 4, 0), Vector3(1, 0, -8))
assert m1 * 3 == Matrix3(Vector3(3, 0, 0), Vector3(3, 15, 0), Vector3(3, 0, -21))
assert m1 * m1 == Matrix3(Vector3(1, 0, 0), Vector3(6, 25, 0), Vector3(-6, 0, 49))
assert m1 / 2.0 == Matrix3(Vector3(0.5, 0, 0), Vector3(0.5, 2.5, 0), Vector3(0.5, 0, -3.5))
assert m1 / 0.5 == Matrix3(Vector3(2, 0, 0), Vector3(2, 10, 0), Vector3(2, 0, -14))
assert m1.transposed() == Matrix3(Vector3(1, 1, 1), Vector3(0, 5, 0), Vector3(0, 0, -7))
def test_euler(self):
'''check that from_euler() and to_euler() are consistent'''
m = Matrix3()
for r in range(-179, 179, 10):
for p in range(-89, 89, 10):
for y in range(-179, 179, 10):
m.from_euler(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler()
v1 = Vector3(r, p, y)
v2 = Vector3(degrees(r2), degrees(p2), degrees(y2))
diff = v1 - v2
assert diff.length() < 1.0e-12
def test_euler312(self):
'''check that from_euler312() and to_euler312() are consistent'''
m = Matrix3()
for r in range(-89, 89, 10):
for p in range(-179, 179, 10):
for y in range(-179, 179, 10):
m.from_euler312(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler312()
v1 = Vector3(r, p, y)
v2 = Vector3(degrees(r2), degrees(p2), degrees(y2))
diff = v1 - v2
assert diff.length() < 1.0e-12
def test_matrixops(self):
m1 = Matrix3(Vector3(1, 0, 0), Vector3(1, 5, 0), Vector3(1, 0, -7))
m1.normalize()
#print(m1)
assert m1.close(Matrix3(Vector3(0.2, -0.98, 0), Vector3(0.1, 1, 0), Vector3(0, 0, 1)), tol=1e-2)
np.testing.assert_almost_equal(m1.trace(), 2.19115332535)
m1.rotate(Vector3(0.2,-0.98,0))
assert m1.close(Matrix3(Vector3(0.2,-0.98,0), Vector3(0.1,1,-0.3), Vector3(0.98,0.2,1)), tol=1e-2)
def test_axisangle(self):
axis = Vector3(0, 1, 0)
angle = radians(45)
m1 = Matrix3()
m1.from_axis_angle(axis, angle)
#print(m1)
assert m1.close(Matrix3(Vector3(0.71, 0.00, 0.71),
Vector3(0.00, 1.00, 0.00),
Vector3(-0.71, 0.00, 0.71)), tol=1e-2)
def METHOD_NAME(self):
'''test the from_two_vectors() method'''
for i in range(100):
v1 = Vector3(1, 0.2, -3)
v2 = Vector3(random.uniform(-5, 5), random.uniform(-5, 5), random.uniform(-5, 5))
m = Matrix3()
m.from_two_vectors(v1, v2)
v3 = m * v1
diff = v3.normalized() - v2.normalized()
(r, p, y) = m.to_euler()
assert diff.length() < 0.001
class LinePlaneTest(unittest.TestCase):
"""
Class to test Line and Plane classes
"""
def __init__(self, *args, **kwargs):
"""Constructor, set up some data that is reused in many tests"""
super(LinePlaneTest, self).__init__(*args, **kwargs)
def test_plane(self):
'''testing line/plane intersection'''
plane = Plane(Vector3(0, 0, 0), Vector3(0, 0, 1))
line = Line(Vector3(0, 0, 100), Vector3(10, 10, -90))
p = line.plane_intersection(plane)
assert p.close(Vector3(11.11, 11.11, 0.00), tol=1e-2)
if __name__ == '__main__':
unittest.main() |
298,843 | generation from string | import re
from typing import List, Optional, Union, cast
from .dev_types import PipetteModel, PipetteName
from .types import (
PipetteChannelType,
PipetteModelType,
PipetteVersionType,
PipetteGenerationType,
PipetteModelMajorVersionType,
PipetteModelMinorVersionType,
)
from .pipette_definition import (
PipetteNameType,
PipetteModelVersionType,
)
DEFAULT_CALIBRATION_OFFSET = [0.0, 0.0, 0.0]
DEFAULT_MODEL = PipetteModelType.p1000
DEFAULT_CHANNELS = PipetteChannelType.SINGLE_CHANNEL
DEFAULT_MODEL_VERSION = PipetteVersionType(major=1, minor=0)
PIPETTE_AVAILABLE_TYPES = [m.name for m in PipetteModelType]
PIPETTE_CHANNELS_INTS = [c.value for c in PipetteChannelType]
def is_model(model_or_name: Union[PipetteName, PipetteModel, None]) -> bool:
"""Determine if we have a real model or just a PipetteName.
Args:
model_or_name (Union[PipetteName, PipetteModel, None]): The pipette we want to check.
Returns:
bool: Whether or not the given string is a PipetteModel
"""
if not model_or_name:
return False
return "v" in model_or_name
def supported_pipette(model_or_name: Union[PipetteName, PipetteModel, None]) -> bool:
"""Determine if a pipette type is supported.
Args:
model_or_name (Union[PipetteName, PipetteModel, None]): The pipette we want to check.
Returns:
bool: Whether or not the given pipette name or model is supported.
"""
if not model_or_name:
return False
split_model_or_name = model_or_name.split("_")
try:
channels_as_int = int(channels_from_string(split_model_or_name[1]))
except ValueError:
channels_as_int = 0
if (
split_model_or_name[0] in PIPETTE_AVAILABLE_TYPES
and channels_as_int in PIPETTE_CHANNELS_INTS
):
return True
return False
def channels_from_string(channels: str) -> PipetteChannelType:
"""Convert channels from a string.
With both `py:data:PipetteName` and `py:data:PipetteObject`, we refer to channel types
as `single`, `multi` or `96`.
Args:
channels (str): The channel string we wish to convert.
Returns:
PipetteChannelType: A `py:obj:PipetteChannelType`
representing the number of channels on a pipette.
"""
if channels == "96":
return PipetteChannelType.NINETY_SIX_CHANNEL
elif channels == "multi":
return PipetteChannelType.EIGHT_CHANNEL
elif channels == "single":
return PipetteChannelType.SINGLE_CHANNEL
else:
raise ValueError("Invalid number of channels")
def version_from_string(version: str) -> PipetteVersionType:
"""Convert a version string to a py:obj:PipetteVersionType.
The version string will either be in the format of `int.int` or `vint.int`.
Args:
version (str): The string version we wish to convert.
Returns:
PipetteVersionType: A pipette version object.
"""
version_list = [v for v in re.split("\\.|[v]", version) if v]
major = cast(PipetteModelMajorVersionType, int(version_list[0]))
if len(version_list) > 1:
minor = cast(PipetteModelMinorVersionType, int(version_list[1]))
else:
minor = 0
return PipetteVersionType(major, minor)
def version_from_generation(pipette_name_list: List[str]) -> PipetteVersionType:
"""Convert a string generation name to a py:obj:PipetteVersionType.
Pipette generations are strings in the format of "gen1" or "gen2", and
usually associated withe :py:data:PipetteName.
Args:
pipette_name_list (List[str]): A list of strings from the separated by `_`
py:data:PipetteName.
Returns:
PipetteVersionType: A pipette version object.
"""
if "flex" in pipette_name_list or "gen3" in pipette_name_list:
return PipetteVersionType(3, 0)
elif "gen2" in pipette_name_list:
return PipetteVersionType(2, 0)
else:
return PipetteVersionType(1, 0)
def METHOD_NAME(pipette_name_list: List[str]) -> PipetteGenerationType:
"""Convert a string generation name to a py:obj:PipetteGenerationType.
Args:
pipette_name_list (List[str]): A list of strings from the separated by `_`
py:data:PipetteName or py:data:PipetteModel.
Returns:
PipetteGenerationType: A pipette version object.
"""
if "flex" in pipette_name_list or "3." in pipette_name_list[-1]:
return PipetteGenerationType.FLEX
elif "gen2" in pipette_name_list or "2." in pipette_name_list[-1]:
return PipetteGenerationType.GEN2
else:
return PipetteGenerationType.GEN1
def convert_to_pipette_name_type(
model_or_name: Union[PipetteName, PipetteModel]
) -> PipetteNameType:
"""Convert the py:data:PipetteName to a py:obj:PipetteModelVersionType.
`PipetteNames` are in the format of "p300_single" or "p300_single_gen1".
Args:
name (PipetteName): The pipette name we want to convert.
Returns:
PipetteNameType: An object representing a broken out PipetteName
string.
"""
split_pipette_model_or_name = model_or_name.split("_")
channels = channels_from_string(split_pipette_model_or_name[1])
generation = METHOD_NAME(split_pipette_model_or_name)
pipette_type = PipetteModelType[split_pipette_model_or_name[0]]
return PipetteNameType(pipette_type, channels, generation)
def convert_pipette_name(
name: PipetteName, provided_version: Optional[str] = None
) -> PipetteModelVersionType:
"""Convert the py:data:PipetteName to a py:obj:PipetteModelVersionType.
`PipetteNames` are in the format of "p300_single" or "p300_single_gen1".
Args:
name (PipetteName): The pipette name we want to convert.
Returns:
PipetteModelVersionType: An object representing a broken out PipetteName
string.
"""
split_pipette_name = name.split("_")
channels = channels_from_string(split_pipette_name[1])
if provided_version:
version = version_from_string(provided_version)
else:
version = version_from_generation(split_pipette_name)
pipette_type = PipetteModelType[split_pipette_name[0]]
return PipetteModelVersionType(pipette_type, channels, version)
def convert_pipette_model(
model: Optional[PipetteModel], provided_version: Optional[str] = ""
) -> PipetteModelVersionType:
"""Convert the py:data:PipetteModel to a py:obj:PipetteModelVersionType.
`PipetteModel` are in the format of "p300_single_v1.0" or "p300_single_v3.3".
Sometimes, models may not have a version, in which case the `provided_version` arg
allows you to specify a version to search for.
Args:
model (PipetteModel): The pipette model we want to convert.
provided_version (str, Optional): The provided version we'd like to look for.
Returns:
PipetteModelVersionType: An object representing a broken out PipetteName
string.
"""
# TODO (lc 12-5-2022) This helper function is needed
# until we stop using "name" and "model" to refer
# to attached pipettes.
# We need to figure out how to default the pipette model as well
# rather than returning a p1000
if model and not provided_version:
pipette_type, parsed_channels, parsed_version = model.split("_")
channels = channels_from_string(parsed_channels)
version = version_from_string(parsed_version)
elif model and provided_version:
pipette_type, parsed_channels = model.split("_")
channels = channels_from_string(parsed_channels)
version = version_from_string(provided_version)
else:
pipette_type = DEFAULT_MODEL.value
channels = DEFAULT_CHANNELS
version = DEFAULT_MODEL_VERSION
return PipetteModelVersionType(PipetteModelType[pipette_type], channels, version) |
298,844 | test doc examples | from insights.tests import context_wrap
from insights.combiners.nmcli_dev_show import AllNmcliDevShow
from insights.combiners import nmcli_dev_show
from insights.parsers.nmcli import NmcliDevShow
import doctest
NMCLI_SHOW1 = """
GENERAL.DEVICE: eth0
GENERAL.TYPE: ethernet
GENERAL.HWADDR: 00:1A:4A:16:02:E0
GENERAL.MTU: 1500
GENERAL.STATE: 100 (connected)
GENERAL.CONNECTION: System eth0
GENERAL.CON-PATH: /org/freedesktop/NetworkManager/ActiveConnection/1
WIRED-PROPERTIES.CARRIER: on
IP4.ADDRESS[1]: 10.72.37.85/23
IP4.GATEWAY: 10.72.37.254
IP4.ROUTE[1]: dst = 0.0.0.0/0, nh = 10.72.37.254, mt = 100
IP4.ROUTE[2]: dst = 10.72.36.0/23, nh = 0.0.0.0, mt = 100
IP4.DNS[1]: 10.72.17.5
IP4.DOMAIN[1]: gsslab.pek2.redhat.com
IP6.ADDRESS[1]: 2620:52:0:4824:21a:4aff:fe16:2e0/64
IP6.ADDRESS[2]: fe80::21a:4aff:fe16:2e0/64
IP6.GATEWAY: fe80:52:0:4824::1fe
IP6.ROUTE[1]: dst = ff00::/8, nh = ::, mt = 256, table=255
IP6.ROUTE[2]: dst = fe80::/64, nh = ::, mt = 256
IP6.ROUTE[3]: dst = ::/0, nh = fe80:52:0:4824::1fe, mt = 1024
IP6.ROUTE[4]: dst = 2620:52:0:4824::/64, nh = ::, mt = 256
GENERAL.DEVICE: lo
GENERAL.TYPE: loopback
GENERAL.HWADDR: 00:00:00:00:00:00
GENERAL.MTU: 65536
GENERAL.STATE: 10 (unmanaged)
GENERAL.CONNECTION: --
GENERAL.CON-PATH: --
IP4.ADDRESS[1]: 127.0.0.1/8
IP4.GATEWAY: --
IP6.ADDRESS[1]: ::1/128
IP6.GATEWAY: --
""".strip()
NMCLI_SHOW2 = """
GENERAL.DEVICE: eth0
GENERAL.TYPE: ethernet
GENERAL.HWADDR: 00:1A:4A:16:02:E0
GENERAL.MTU: 1500
GENERAL.STATE: 100 (connected)
GENERAL.CONNECTION: System eth0
GENERAL.CON-PATH: /org/freedesktop/NetworkManager/ActiveConnection/1
WIRED-PROPERTIES.CARRIER: on
IP4.ADDRESS[1]: 10.72.37.85/23
IP4.GATEWAY: 10.72.37.254
IP4.ROUTE[1]: dst = 0.0.0.0/0, nh = 10.72.37.254, mt = 100
IP4.ROUTE[2]: dst = 10.72.36.0/23, nh = 0.0.0.0, mt = 100
IP4.DNS[1]: 10.72.17.5
IP4.DOMAIN[1]: gsslab.pek2.redhat.com
IP6.ADDRESS[1]: 2620:52:0:4824:21a:4aff:fe16:2e0/64
IP6.ADDRESS[2]: fe80::21a:4aff:fe16:2e0/64
IP6.GATEWAY: fe80:52:0:4824::1fe
IP6.ROUTE[1]: dst = ff00::/8, nh = ::, mt = 256, table=255
IP6.ROUTE[2]: dst = fe80::/64, nh = ::, mt = 256
IP6.ROUTE[3]: dst = ::/0, nh = fe80:52:0:4824::1fe, mt = 1024
IP6.ROUTE[4]: dst = 2620:52:0:4824::/64, nh = ::, mt = 256
""".strip()
NMCLI_SHOW3 = """
GENERAL.DEVICE: lo
GENERAL.TYPE: loopback
GENERAL.HWADDR: 00:00:00:00:00:00
GENERAL.MTU: 65536
GENERAL.STATE: 10 (unmanaged)
GENERAL.CONNECTION: --
GENERAL.CON-PATH: --
IP4.ADDRESS[1]: 127.0.0.1/8
IP4.GATEWAY: --
IP6.ADDRESS[1]: ::1/128
IP6.GATEWAY: --
""".strip()
def test_allnmcli1():
nmcli_obj = NmcliDevShow(context_wrap(NMCLI_SHOW1))
allnmcli_obj = AllNmcliDevShow(nmcli_obj, None)
con_dev = allnmcli_obj.connected_devices
assert sorted(con_dev) == sorted(['eth0'])
assert allnmcli_obj['eth0']['IP4_GATEWAY'] == "10.72.37.254"
assert allnmcli_obj['eth0']['IP4_DNS1'] == "10.72.17.5"
assert allnmcli_obj['eth0']['STATE'] == "connected"
assert allnmcli_obj['eth0']['CON-PATH'] == "/org/freedesktop/NetworkManager/ActiveConnection/1"
assert len(allnmcli_obj['lo']) == 10
def test_allnmcli2():
nmcli_obj1 = NmcliDevShow(context_wrap(NMCLI_SHOW2))
nmcli_obj2 = NmcliDevShow(context_wrap(NMCLI_SHOW3))
allnmcli_obj = AllNmcliDevShow(None, [nmcli_obj1, nmcli_obj2])
con_dev = allnmcli_obj.connected_devices
assert sorted(con_dev) == sorted(['eth0'])
assert allnmcli_obj['eth0']['IP4_GATEWAY'] == "10.72.37.254"
assert allnmcli_obj['eth0']['IP4_DNS1'] == "10.72.17.5"
assert allnmcli_obj['eth0']['STATE'] == "connected"
assert allnmcli_obj['eth0']['CON-PATH'] == "/org/freedesktop/NetworkManager/ActiveConnection/1"
assert len(allnmcli_obj['lo']) == 10
def METHOD_NAME():
env = {
'allnmclidevshow': AllNmcliDevShow(NmcliDevShow(context_wrap(NMCLI_SHOW1)), None),
}
failed, total = doctest.testmod(nmcli_dev_show, globs=env)
assert failed == 0 |
298,845 | searchable attribs for node type | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2009-2022 NV Access Limited, Aleksey Sadovoy
from . import VirtualBuffer, VirtualBufferTextInfo
import browseMode
import controlTypes
import NVDAObjects.IAccessible
from NVDAObjects.IAccessible.adobeAcrobat import normalizeStdName, AcrobatNode
import winUser
import IAccessibleHandler
import oleacc
from logHandler import log
import textInfos
import languageHandler
class AdobeAcrobat_TextInfo(VirtualBufferTextInfo):
def _getBoundingRectFromOffset(self,offset):
formatFieldStart, formatFieldEnd = self._getUnitOffsets(textInfos.UNIT_FORMATFIELD, offset)
# The format field starts at the first character.
for field in reversed(self._getFieldsInRange(formatFieldStart, formatFieldStart+1)):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "formatChange"):
# This is no format field.
continue
attrs = field.field
indexInParent = attrs.get("_indexInParent")
if indexInParent is None:
continue
try:
obj = self._getNVDAObjectFromOffset(offset).getChild(indexInParent)
except IndexError:
obj = None
if not obj:
continue
if not obj.location:
# Older versions of Adobe Reader have per word objects, but they don't expose a location
break
return obj.location
return super(AdobeAcrobat_TextInfo, self)._getBoundingRectFromOffset(offset)
def _normalizeControlField(self,attrs):
stdName = attrs.get("acrobat::stdname", "")
try:
role, level = normalizeStdName(stdName)
except LookupError:
role, level = None, None
if not role:
role = IAccessibleHandler.NVDARoleFromAttr(attrs['IAccessible::role'])
states = IAccessibleHandler.getStatesSetFromIAccessibleAttrs(attrs)
role, states = controlTypes.transformRoleStates(role, states)
if (
role == controlTypes.Role.EDITABLETEXT
and states.issuperset({
controlTypes.State.READONLY,
controlTypes.State.FOCUSABLE,
controlTypes.State.LINKED
})
):
# HACK: Acrobat sets focus states on text nodes beneath links,
# making them appear as read only editable text fields.
states.difference_update({controlTypes.State.FOCUSABLE, controlTypes.State.FOCUSED})
attrs['role']=role
attrs['states']=states
if level:
attrs["level"] = level
return super(AdobeAcrobat_TextInfo, self)._normalizeControlField(attrs)
def _normalizeFormatField(self, attrs):
try:
attrs["language"] = languageHandler.normalizeLanguage(attrs["language"])
except KeyError:
pass
try:
attrs["_indexInParent"] = int(attrs["_indexInParent"])
except KeyError:
pass
fontSize = attrs.get("font-size")
if fontSize is not None:
# Translators: Abbreviation for points, a measurement of font size.
attrs["font-size"] = pgettext("font size", "%s pt") % fontSize
return attrs
class AdobeAcrobat(VirtualBuffer):
TextInfo = AdobeAcrobat_TextInfo
programmaticScrollMayFireEvent = True
def __init__(self,rootNVDAObject):
super(AdobeAcrobat,self).__init__(rootNVDAObject,backendName="adobeAcrobat")
def __contains__(self,obj):
return winUser.isDescendantWindow(self.rootNVDAObject.windowHandle, obj.windowHandle)
def _get_isAlive(self):
if self.isLoading:
return True
root=self.rootNVDAObject
if not root:
return False
if not winUser.isWindow(root.windowHandle) or root.role == controlTypes.Role.UNKNOWN:
return False
return True
def getNVDAObjectFromIdentifier(self, docHandle, ID):
return NVDAObjects.IAccessible.getNVDAObjectFromEvent(docHandle, winUser.OBJID_CLIENT, ID)
def getIdentifierFromNVDAObject(self,obj):
if not isinstance(obj,AcrobatNode):
raise LookupError
return obj.windowHandle, obj.accID
def METHOD_NAME(self,nodeType):
if nodeType in ("link", "unvisitedLink"):
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_LINK]}
elif nodeType=="table":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_TABLE]}
elif nodeType.startswith("heading") and nodeType[7:].isdigit():
attrs = {"acrobat::stdname": ["H%s" % nodeType[7:]]}
elif nodeType == "heading":
attrs = {"acrobat::stdname": ["H", "H1", "H2", "H3", "H4", "H5", "H6"]}
elif nodeType == "formField":
attrs = {"IAccessible::role": [oleacc.ROLE_SYSTEM_PUSHBUTTON, oleacc.ROLE_SYSTEM_RADIOBUTTON, oleacc.ROLE_SYSTEM_CHECKBUTTON, oleacc.ROLE_SYSTEM_COMBOBOX, oleacc.ROLE_SYSTEM_LIST, oleacc.ROLE_SYSTEM_OUTLINE, oleacc.ROLE_SYSTEM_TEXT], "IAccessible::state_%s" % oleacc.STATE_SYSTEM_READONLY: [None]}
elif nodeType == "list":
attrs = {"acrobat::stdname": ["L"]}
elif nodeType == "listItem":
attrs = {"acrobat::stdname": ["LI"]}
elif nodeType=="button":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_PUSHBUTTON]}
elif nodeType=="edit":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_TEXT],"IAccessible::state_%s"%oleacc.STATE_SYSTEM_READONLY:[None]}
elif nodeType=="radioButton":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_RADIOBUTTON]}
elif nodeType=="checkBox":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_CHECKBUTTON]}
elif nodeType == "blockQuote":
attrs = {"acrobat::stdname": ["BlockQuote"]}
elif nodeType=="focusable":
attrs={"IAccessible::state_%s"%oleacc.STATE_SYSTEM_FOCUSABLE:[1]}
elif nodeType=="graphic":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_GRAPHIC]}
elif nodeType=="comboBox":
attrs={"IAccessible::role":[oleacc.ROLE_SYSTEM_COMBOBOX]}
else:
return None
return attrs
def event_valueChange(self, obj, nextHandler):
if obj.event_childID == 0:
return nextHandler()
if not self._handleScrollTo(obj):
return nextHandler()
def _get_ElementsListDialog(self):
return ElementsListDialog
class ElementsListDialog(browseMode.ElementsListDialog):
ELEMENT_TYPES=browseMode.ElementsListDialog.ELEMENT_TYPES[0:2] |
298,846 | repr array | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
from _thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def METHOD_NAME(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def repr_frozenset(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr |
298,847 | attach kernel driver | """USB Core
This is a subset of the PyUSB core module.
"""
from __future__ import annotations
import array
from typing import Optional
from circuitpython_typing import ReadableBuffer
class USBError(OSError):
"""Catchall exception for USB related errors."""
...
class USBTimeoutError(USBError):
"""Raised when a USB transfer times out."""
...
def find(
find_all: bool = False,
*,
idVendor: Optional[int] = None,
idProduct: Optional[int] = None,
) -> Device:
"""Find the first device that matches the given requirements or, if
find_all is True, return a generator of all matching devices.
Returns None if no device matches.
"""
class Device:
def __init__(self) -> None:
"""User code cannot create Device objects. Instead, get them from
`usb.core.find`.
"""
...
idVendor: int
"""The USB vendor ID of the device"""
idProduct: int
"""The USB product ID of the device"""
serial_number: str
"""The USB device's serial number string."""
product: str
"""The USB device's product string."""
manufacturer: str
"""The USB device's manufacturer string."""
def write(
self, endpoint: int, data: ReadableBuffer, timeout: Optional[int] = None
) -> int:
"""Write data to a specific endpoint on the device.
:param int endpoint: the bEndpointAddress you want to communicate with.
:param ReadableBuffer data: the data to send
:param int timeout: Time to wait specified in milliseconds. (Different from most CircuitPython!)
:returns: the number of bytes written
"""
...
def read(
self, endpoint: int, size_or_buffer: array.array, timeout: Optional[int] = None
) -> int:
"""Read data from the endpoint.
:param int endpoint: the bEndpointAddress you want to communicate with.
:param array.array size_or_buffer: the array to read data into. PyUSB also allows size but CircuitPython only support array to force deliberate memory use.
:param int timeout: Time to wait specified in milliseconds. (Different from most CircuitPython!)
:returns: the number of bytes read
"""
...
def ctrl_transfer(
self,
bmRequestType: int,
bRequest: int,
wValue: int = 0,
wIndex: int = 0,
data_or_wLength: Optional[array.array] = None,
timeout: Optional[int] = None,
) -> int:
"""Do a control transfer on the endpoint 0. The parameters bmRequestType,
bRequest, wValue and wIndex are the same of the USB Standard Control
Request format.
Control requests may or may not have a data payload to write/read.
In cases which it has, the direction bit of the bmRequestType
field is used to infer the desired request direction.
For host to device requests (OUT), data_or_wLength parameter is
the data payload to send, and it must be a sequence type convertible
to an array object. In this case, the return value is the number
of bytes written in the data payload.
For device to host requests (IN), data_or_wLength is an array
object which the data will be read to, and the return value is the
number of bytes read.
"""
...
def is_kernel_driver_active(self, interface: int) -> bool:
"""Determine if CircuitPython is using the interface. If it is, the
object will be unable to perform I/O.
:param int interface: the device interface number to check
"""
...
def detach_kernel_driver(self, interface: int) -> None:
"""Stop CircuitPython from using the interface. If successful, you
will then be able to perform I/O. CircuitPython will automatically
re-start using the interface on reload.
:param int interface: the device interface number to stop CircuitPython on
"""
...
def METHOD_NAME(self, interface: int) -> None:
"""Allow CircuitPython to use the interface if it wants to.
:param int interface: the device interface number to allow CircuitPython to use
"""
... |
298,848 | validate tag | """
Usage:
This script generates a JSON object containing binary download URLs and their corresponding checksums
for a given release tag of ixofoundation/ixo-blockchain or from a provided checksum URL.
The binary JSON is compatible with cosmovisor and with the chain registry.
You can run this script with the following commands:
❯ python create_binaries_json.py --checksums_url https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/sha256sum.txt
Output:
{
"binaries": {
"linux/arm64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/2.0.0/ixod-2.0.0-linux-arm64?checksum=<checksum>",
"darwin/arm64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/2.0.0/ixod-2.0.0-darwin-arm64?checksum=<checksum>",
"darwin/amd64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/2.0.0/ixod-2.0.0-darwin-amd64?checksum=<checksum>,
"linux/amd64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/2.0.0/ixod-2.0.0-linux-amd64?checksum=><checksum>"
}
}
Expects a checksum in the form:
<CHECKSUM> ixod-<VERSION>-<OS>-<ARCH>[.tar.gz]
<CHECKSUM> ixod-<VERSION>-<OS>-<ARCH>[.tar.gz]
...
Example:
0711bacaf0cee57f613796ba8c274011e22c3968e98755a105a1a500c87e19f5 ixod-2.0.0-linux-amd64
0859b596ca18257cf424223b35057a4a5296c81fe1e43164673b3344876daaeb ixod-2.0.0-linux-amd64.tar.gz
(From: https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/sha256sum.txt)
❯ python create_binaries_json.py --tag v2.0.0
Output:
{
"binaries": {
"linux/arm64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/ixod-2.0.0-linux-arm64?checksum=<checksum>",
"darwin/arm64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/ixod-2.0.0-darwin-arm64?checksum=<checksum>",
"darwin/amd64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/ixod-2.0.0-darwin-amd64?checksum=<checksum>",
"linux/amd64": "https://github.com/ixofoundation/ixo-blockchain/releases/download/v2.0.0/ixod-2.0.0-linux-amd64?checksum=><checksum>"
}
}
Expect a checksum to be present at:
https://github.com/ixofoundation/ixo-blockchain/releases/download/<TAG>/sha256sum.txt
"""
import requests
import json
import argparse
import re
import sys
def METHOD_NAME(tag):
pattern = '^v[0-9]+.[0-9]+.[0-9]+$'
return bool(re.match(pattern, tag))
def download_checksums(checksums_url):
response = requests.get(checksums_url)
if response.status_code != 200:
raise ValueError(
f"Failed to fetch sha256sum.txt. Status code: {response.status_code}")
return response.text
def checksums_to_binaries_json(checksums):
binaries = {}
# Parse the content and create the binaries dictionary
for line in checksums.splitlines():
checksum, filename = line.split(' ')
# exclude tar.gz files
if not filename.endswith('.tar.gz') and filename.startswith('ixod'):
try:
_, tag, platform, arch = filename.split('-')
except ValueError:
print(
f"Error: Expected binary name in the form: ixod-X.Y.Z-platform-architecture, but got {filename}")
sys.exit(1)
_, tag, platform, arch, = filename.split('-')
# exclude universal binaries and windows binaries
if arch == 'all' or platform == 'windows':
continue
binaries[f"{platform}/{arch}"] = f"https://github.com/ixofoundation/ixo-blockchain/releases/download/v{tag}/{filename}?checksum=sha256:{checksum}"
binaries_json = {
"binaries": binaries
}
return json.dumps(binaries_json, indent=2)
def main():
parser = argparse.ArgumentParser(description="Create binaries json")
parser.add_argument('--tag', metavar='tag', type=str,
help='the tag to use (e.g v2.0.0)')
parser.add_argument('--checksums_url', metavar='checksums_url',
type=str, help='URL to the checksum')
args = parser.parse_args()
# Validate the tag format
if args.tag and not METHOD_NAME(args.tag):
print("Error: The provided tag does not follow the 'vX.Y.Z' format.")
sys.exit(1)
# Ensure that only one of --tag or --checksums_url is specified
if not bool(args.tag) ^ bool(args.checksums_url):
parser.error("Only one of tag or --checksums_url must be specified")
sys.exit(1)
checksums_url = args.checksums_url if args.checksums_url else f"https://github.com/ixofoundation/ixo-blockchain/releases/download/{args.tag}/sha256sum.txt"
checksums = download_checksums(checksums_url)
binaries_json = checksums_to_binaries_json(checksums)
print(binaries_json)
if __name__ == "__main__":
main() |
298,849 | get velocities | # -*- coding: utf-8 -*-
"""This module defines a class for handling trajectory frames."""
import numpy as np
from prody.measure import getRMSD
from prody.utilities import importLA
__all__ = ['Frame']
class Frame(object):
"""A class for storing trajectory frame coordinates and provide methods
acting on them."""
__slots__ = ['_traj', '_index', '_coords', '_unitcell', '_velocs']
def __init__(self, traj, index, coords, unitcell=None, velocs=None):
self._traj = traj
self._index = index
self._coords = coords
self._velocs = velocs
self._unitcell = unitcell
def __repr__(self):
sel = ''
if self._traj._indices is not None:
sel = 'selected {0} of '.format(self.numSelected())
return ('<Frame: {0} from {1} ({2}{3} atoms)>'
).format(self._index, self._traj.getTitle(), sel,
self._traj.numAtoms())
def __str__(self):
return 'Frame {0} from {1}'.format(self._index,
self._traj.getTitle())
def numAtoms(self):
"""Returns number of atoms."""
return self._traj.numAtoms()
def numSelected(self):
"""Returns number of selected atoms."""
return self._traj.numSelected()
def getAtoms(self):
"""Returns associated/selected atoms."""
return self._traj.getAtoms()
def getIndex(self):
"""Returns index."""
return self._index
def getWeights(self):
"""Returns coordinate weights for selected atoms."""
return self._traj.getWeights()
def _getWeights(self):
return self._traj._getWeights()
def getTrajectory(self):
"""Returns the trajectory."""
return self._traj
def getCoords(self):
"""Returns a copy of coordinates of (selected) atoms."""
indices = self._traj._indices
if indices is None:
return self._getxyz().copy()
else:
return self._getxyz()[indices]
def _getCoords(self):
"""Returns coordinates of (selected) atoms."""
indices = self._traj._indices
if indices is None:
return self._getxyz()
else:
return self._getxyz()[indices]
def _getxyz(self):
"""Returns coordinates array."""
ag = self._traj.link()
if ag is None:
coords = self._coords
else:
coords = ag._getCoords()
if coords is None:
raise ValueError('coordinates are not set')
return coords
def METHOD_NAME(self):
"""Returns a copy of velocities of (selected) atoms."""
if self._velocs is not None:
indices = self._traj._indices
if indices is None:
return self._velocs.copy()
else:
return self._velocs[indices]
def _getVelocities(self):
"""Returns velocities of (selected) atoms."""
if self._velocs is not None:
indices = self._traj._indices
if indices is None:
return self._velocs
else:
return self._velocs[indices]
def getUnitcell(self):
"""Returns a copy of unitcell array."""
if self._unitcell is not None:
return self._unitcell.copy()
def _getUnitcell(self):
return self._unitcell
def getDeviations(self):
"""Returns deviations from the trajectory reference coordinates."""
indices = self._traj._indices
coords = self._getCoords()
if indices is None:
return coords - self._traj._coords
else:
return coords - self._traj._coords[indices]
def getRMSD(self):
"""Returns RMSD from the trajectory reference coordinates. If weights
for the trajectory are set, weighted RMSD will be returned."""
indices = self._traj._indices
traj = self._traj
coords = self._getCoords()
if indices is None:
return getRMSD(coords, traj._coords, traj._weights)
else:
if traj._weights is None:
return getRMSD(coords, traj._coords[indices])
else:
return getRMSD(coords, traj._coords[indices],
traj._weights[indices])
def superpose(self):
"""Superpose frame onto the trajectory reference coordinates. Note
that transformation matrix is calculated based on selected atoms and
applied to all atoms. If atom weights for the trajectory are set, they
will be used to calculate the transformation."""
traj = self._traj
indices = traj._indices
mob = mov = self._getxyz()
weights = traj._weights
if indices is None:
tar = traj._coords
mov = None
else:
tar = traj._coords[indices]
mob = mob[indices]
if weights is not None:
weights = weights[indices]
linalg = importLA()
if weights is None:
mob_com = mob.mean(0)
mob_org = mob - mob_com
tar_com = tar.mean(0)
tar_org = tar - tar_com
matrix = np.dot(tar_org.T, mob_org)
else:
weights_sum = weights.sum()
weights_dot = np.dot(weights.T, weights)
mob_com = (mob * weights).sum(axis=0) / weights_sum
mob_org = mob - mob_com
tar_com = (tar * weights).sum(axis=0) / weights_sum
tar_org = tar - tar_com
matrix = np.dot((tar_org * weights).T,
(mob_org * weights)) / weights_dot
U, s, Vh = linalg.svd(matrix)
Id = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, np.sign(linalg.det(matrix))]])
rotation = np.dot(Vh.T, np.dot(Id, U.T))
if mov is None:
np.add(np.dot(mob_org, rotation), tar_com, mob)
else:
np.add(np.dot(mov, rotation),
(tar_com - np.dot(mob_com, rotation)), mov) |
298,850 | test no versions | from pathlib import Path, PurePosixPath
import networkx
import pytest
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from gcsfs import GCSFileSystem
from s3fs.core import S3FileSystem
from kedro.extras.datasets.networkx import GraphMLDataSet
from kedro.io import DatasetError, Version
from kedro.io.core import PROTOCOL_DELIMITER
ATTRS = {
"source": "from",
"target": "to",
"name": "fake_id",
"key": "fake_key",
"link": "fake_link",
}
@pytest.fixture
def filepath_graphml(tmp_path):
return (tmp_path / "some_dir" / "test.graphml").as_posix()
@pytest.fixture
def graphml_data_set(filepath_graphml):
return GraphMLDataSet(
filepath=filepath_graphml,
load_args={"node_type": int},
save_args={},
)
@pytest.fixture
def versioned_graphml_data_set(filepath_graphml, load_version, save_version):
return GraphMLDataSet(
filepath=filepath_graphml,
version=Version(load_version, save_version),
load_args={"node_type": int},
save_args={},
)
@pytest.fixture()
def dummy_graph_data():
return networkx.complete_graph(3)
class TestGraphMLDataSet:
def test_save_and_load(self, graphml_data_set, dummy_graph_data):
"""Test saving and reloading the data set."""
graphml_data_set.save(dummy_graph_data)
reloaded = graphml_data_set.load()
assert dummy_graph_data.nodes(data=True) == reloaded.nodes(data=True)
assert graphml_data_set._fs_open_args_load == {"mode": "rb"}
assert graphml_data_set._fs_open_args_save == {"mode": "wb"}
def test_load_missing_file(self, graphml_data_set):
"""Check the error when trying to load missing file."""
pattern = r"Failed while loading data from data set GraphMLDataSet\(.*\)"
with pytest.raises(DatasetError, match=pattern):
assert graphml_data_set.load()
def test_exists(self, graphml_data_set, dummy_graph_data):
"""Test `exists` method invocation."""
assert not graphml_data_set.exists()
graphml_data_set.save(dummy_graph_data)
assert graphml_data_set.exists()
@pytest.mark.parametrize(
"filepath,instance_type",
[
("s3://bucket/file.graphml", S3FileSystem),
("file:///tmp/test.graphml", LocalFileSystem),
("/tmp/test.graphml", LocalFileSystem),
("gcs://bucket/file.graphml", GCSFileSystem),
("https://example.com/file.graphml", HTTPFileSystem),
],
)
def test_protocol_usage(self, filepath, instance_type):
data_set = GraphMLDataSet(filepath=filepath)
assert isinstance(data_set._fs, instance_type)
path = filepath.split(PROTOCOL_DELIMITER, 1)[-1]
assert str(data_set._filepath) == path
assert isinstance(data_set._filepath, PurePosixPath)
def test_catalog_release(self, mocker):
fs_mock = mocker.patch("fsspec.filesystem").return_value
filepath = "test.graphml"
data_set = GraphMLDataSet(filepath=filepath)
data_set.release()
fs_mock.invalidate_cache.assert_called_once_with(filepath)
class TestGraphMLDataSetVersioned:
def test_save_and_load(self, versioned_graphml_data_set, dummy_graph_data):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_graphml_data_set.save(dummy_graph_data)
reloaded = versioned_graphml_data_set.load()
assert dummy_graph_data.nodes(data=True) == reloaded.nodes(data=True)
assert versioned_graphml_data_set._fs_open_args_load == {"mode": "rb"}
assert versioned_graphml_data_set._fs_open_args_save == {"mode": "wb"}
def METHOD_NAME(self, versioned_graphml_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for GraphMLDataSet\(.+\)"
with pytest.raises(DatasetError, match=pattern):
versioned_graphml_data_set.load()
def test_exists(self, versioned_graphml_data_set, dummy_graph_data):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_graphml_data_set.exists()
versioned_graphml_data_set.save(dummy_graph_data)
assert versioned_graphml_data_set.exists()
def test_prevent_override(self, versioned_graphml_data_set, dummy_graph_data):
"""Check the error when attempt to override the same data set
version."""
versioned_graphml_data_set.save(dummy_graph_data)
pattern = (
r"Save path \'.+\' for GraphMLDataSet\(.+\) must not "
r"exist if versioning is enabled"
)
with pytest.raises(DatasetError, match=pattern):
versioned_graphml_data_set.save(dummy_graph_data)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_graphml_data_set, load_version, save_version, dummy_graph_data
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
rf"Save version '{save_version}' did not match "
rf"load version '{load_version}' for GraphMLDataSet\(.+\)"
)
with pytest.warns(UserWarning, match=pattern):
versioned_graphml_data_set.save(dummy_graph_data)
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "test.graphml"
ds = GraphMLDataSet(filepath=filepath)
ds_versioned = GraphMLDataSet(
filepath=filepath, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = f"version=Version(load={load_version}, save='{save_version}')"
assert ver_str in str(ds_versioned)
assert "GraphMLDataSet" in str(ds_versioned)
assert "GraphMLDataSet" in str(ds)
assert "protocol" in str(ds_versioned)
assert "protocol" in str(ds)
def test_versioning_existing_dataset(
self, graphml_data_set, versioned_graphml_data_set, dummy_graph_data
):
"""Check the error when attempting to save a versioned dataset on top of an
already existing (non-versioned) dataset."""
graphml_data_set.save(dummy_graph_data)
assert graphml_data_set.exists()
assert graphml_data_set._filepath == versioned_graphml_data_set._filepath
pattern = (
f"(?=.*file with the same name already exists in the directory)"
f"(?=.*{versioned_graphml_data_set._filepath.parent.as_posix()})"
)
with pytest.raises(DatasetError, match=pattern):
versioned_graphml_data_set.save(dummy_graph_data)
# Remove non-versioned dataset and try again
Path(graphml_data_set._filepath.as_posix()).unlink()
versioned_graphml_data_set.save(dummy_graph_data)
assert versioned_graphml_data_set.exists() |
298,851 | mktempfiles | # Copyright 2017 syzkaller project authors. All rights reserved.
# Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
'''
This module provides classes which implement header file preprocessing.
'''
import logging
import ntpath
import os
import subprocess
import tempfile
import traceback
import pycparser
template = '''
#include <stdbool.h>
#define _GNU_SOURCE /* See feature_test_macros(7) */
// ------ MAKE PYCPARSER HAPPY ------
#define __attribute__(...)
#define __inline inline
#define __restrict
#define __extension__
// #define __sighandler_t int
#define __user
#define __asm__(...)
#define __volatile__(...)
#define __signed__ signed
#define __int128_t unsigned long long // Hacky
#define __alignof__(...) 0
#define INIT // regex
typedef unsigned int size_t;
// ------ MAKE PYCPARSER HAPPY ------
#include <stdint.h>
%(include_lines)s
%(header_file_includes)s
'''
class HeaderFilePreprocessorException(Exception):
'''Exceptions raised from HeaderFileParser. '''
pass
class HeaderFilePreprocessor(object):
'''
Given a C header filename, perform pre-processing and return an
ast that can be used for further processing.
Usage :
>>> import tempfile
>>> t = tempfile.NamedTemporaryFile()
>>> contents = """
... struct ARRAY_OF_POINTERS_CONTAINER {
... unsigned int *ptr[10];
... int **n;
... };
...
... struct ARRAY_CONTAINER {
... int g[10];
... int h[20][30];
... };
...
... struct REGULAR_STRUCT {
... int x;
... char *y;
... void *ptr;
... };
...
... struct STRUCT_WITH_STRUCT_PTR {
... struct REGULAR_STRUCT *struct_ptr;
... int z;
... };
... """
>>> t.write(contents) ; t.flush()
>>> h = HeaderFilePreprocessor([t.name])
>>> ast = h.get_ast()
>>> print type(ast)
<class 'pycparser.c_ast.FileAST'>
'''
def __init__(self, filenames, include_lines='', loglvl=logging.INFO):
self.filenames = filenames
self.include_lines = include_lines
self._setuplogging(loglvl)
self.METHOD_NAME()
self._copyfiles()
self._gcc_preprocess()
def execute(self, cmd):
self.logger.debug('HeaderFilePreprocessor.execute: %s', cmd)
p = subprocess.Popen(cmd, shell=True)
try:
os.waitpid(p.pid, 0)
except OSError as exception:
raise HeaderFilePreprocessorException(exception)
def _setuplogging(self, loglvl):
self.logger = logging.getLogger(self.__class__.__name__)
formatter = logging.Formatter('DEBUG:%(name)s:%(message)s')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(loglvl)
self.logger.addHandler(sh)
self.logger.setLevel(loglvl)
def _copyfiles(self):
self.execute('cp %s %s' % (' '.join(self.filenames), self.tempdir))
def METHOD_NAME(self):
self.tempdir = tempfile.mkdtemp()
self.temp_sourcefile = os.path.join(self.tempdir, 'source.c')
self.temp_objectfile = os.path.join(self.tempdir, 'source.o')
self.logger.debug(('HeaderFilePreprocessor._mktempfiles: sourcefile=%s'
'objectfile=%s'), self.temp_sourcefile, self.temp_objectfile)
header_file_includes = ''
include_lines = self.include_lines
for name in self.filenames:
header_file_includes = '%s#include "%s"\n' % (header_file_includes,
ntpath.basename(name))
open(self.temp_sourcefile, 'w').write(template % (locals()))
def _gcc_preprocess(self):
self.execute('gcc -I. -E -P -c %s > %s'
% (self.temp_sourcefile, self.temp_objectfile))
def _get_ast(self):
return pycparser.parse_file(self.temp_objectfile)
def get_ast(self):
try:
return self._get_ast()
except pycparser.plyparser.ParseError as e:
raise HeaderFilePreprocessorException(e) |
298,852 | do pagelink | import datetime
import locale
import urllib.parse
from django import template
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
import tracker.viewutil as viewutil
register = template.Library()
def tryresolve(var, context, default=None):
try:
return var.resolve(context)
except template.VariableDoesNotExist:
return default
def sortlink(style, contents, **args):
return format_html(
'<a href="?{args}"{style}><span style="display:none;">{contents}</span></a>',
args=urllib.parse.urlencode([a for a in args.items() if a[1]]),
style=format_html(' class="{style}"', style=style) if style else '',
contents=contents,
)
@register.simple_tag(takes_context=True)
def sort(context, sort_field, page=1):
return sortlink('asc', 'Asc', sort=sort_field, order=1, page=page) + sortlink(
'dsc', 'Dsc', sort=sort_field, order=-1, page=page
)
@register.tag('pagefirst')
@register.tag('pagefull')
def do_pageff(parser, token):
try:
(tag_name,) = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag takes no arguments' % token.contents.split()[0]
)
return PageFLFNode(tag_name)
@register.tag('pagelast')
def do_pagel(parser, token):
try:
tag_name, page = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag takes one argument' % token.contents.split()[0]
)
return PageFLFNode(tag_name, page)
class PageFLFNode(template.Node):
def __init__(self, tag, page='request.GET.page'):
self.tag = tag
self.page = template.Variable(page)
def render(self, context):
sort = tryresolve(template.Variable('request.GET.sort'), context)
order = tryresolve(template.Variable('request.GET.order'), context)
if self.tag == 'pagefirst':
return sortlink('first', '|< ', sort=sort, order=order, page=1)
elif self.tag == 'pagelast':
page = self.page.resolve(context)
return sortlink('last', '>| ', sort=sort, order=order, page=page)
elif self.tag == 'pagefull':
return sortlink(None, 'View Full List', sort=sort, order=order, page='full')
@register.tag('pageprev')
@register.tag('pagenext')
def do_pagepn(parser, token):
try:
tag_name, page = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires one argument' % token.contents.split()[0]
)
return PagePNNode(tag_name, page)
class PagePNNode(template.Node):
dc = {'pageprev': '< ', 'pagenext': '> '}
def __init__(self, tag, page):
self.tag = tag
self.page = template.Variable(page)
def render(self, context):
sort = tryresolve(template.Variable('request.GET.sort'), context)
order = tryresolve(template.Variable('request.GET.order'), context)
page = self.page.resolve(context)
return sortlink(
self.tag[4:], PagePNNode.dc[self.tag], sort=sort, order=order, page=page
)
@register.tag('pagelink')
def METHOD_NAME(parser, token):
try:
tag_name, page = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires one argument' % token.contents.split()[0]
)
return PageLinkNode(tag_name, page)
class PageLinkNode(template.Node):
def __init__(self, tag, page):
self.tag = tag
self.page = template.Variable(page)
def render(self, context):
sort = tryresolve(template.Variable('request.GET.sort'), context)
order = tryresolve(template.Variable('request.GET.order'), context)
page = self.page.resolve(context)
return sortlink('', page, sort=sort, order=order, page=page)
@register.tag('rendertime')
def do_rendertime(parser, token):
try:
tag_name, time = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires a single argument' % token.contents.split()[0]
)
return RenderTimeNode(time)
class RenderTimeNode(template.Node):
def __init__(self, time):
self.time = template.Variable(time)
def render(self, context):
try:
time = self.time.resolve(context)
try:
now = datetime.datetime.now() - time
except TypeError:
return ''
return '%d.%d seconds' % (now.seconds, now.microseconds)
except template.VariableDoesNotExist:
return ''
@register.filter
def forumfilter(value, autoescape=None):
if autoescape:
esc = conditional_escape
else:
def esc(x):
return x
return mark_safe(esc(value).replace('\n', '<br />'))
forumfilter.is_safe = True
forumfilter.needs_autoescape = True
@register.filter
def money(value):
locale.setlocale(locale.LC_ALL, '')
try:
if not value:
return locale.currency(0.0)
return locale.currency(value, symbol=True, grouping=True)
except ValueError:
locale.setlocale(locale.LC_MONETARY, 'en_US.utf8')
if not value:
return locale.currency(0.0)
return locale.currency(value, symbol=True, grouping=True)
money.is_safe = True
@register.filter('abs')
def filabs(value, arg):
try:
return abs(int(value) - int(arg))
except ValueError:
raise template.TemplateSyntaxError('abs requires integer arguments')
@register.filter('mod')
def filmod(value, arg):
try:
return int(value) % int(arg)
except ValueError:
raise template.TemplateSyntaxError('mod requires integer arguments')
@register.filter('negate')
def negate(value):
return not value
@register.simple_tag
def admin_url(obj):
return viewutil.admin_url(obj)
@register.simple_tag(takes_context=True)
def standardform(
context, form, formid='formid', submittext='Submit', action=None, showrequired=True
):
return template.loader.render_to_string(
'standardform.html',
{
'form': form,
'formid': formid,
'submittext': submittext,
action: action,
'csrf_token': context.get('csrf_token', None),
'showrequired': showrequired,
},
)
@register.simple_tag(takes_context=True)
def form_innards(context, form, showrequired=True):
return template.loader.render_to_string(
'form_innards.html',
{
'form': form,
'showrequired': showrequired,
'csrf_token': context.get('csrf_token', None),
},
) |
298,853 | qpt plot | __all__ = ['qpt_plot', 'qpt_plot_combined', 'qpt']
from numpy import hstack, real, imag
import scipy.linalg as la
from . import tensor, spre, spost, stack_columns, unstack_columns
from .visualization import matrix_histogram
import itertools
try:
import matplotlib.pyplot as plt
except:
pass
def _index_permutations(size_list):
"""
Generate a list with all index permutations.
Parameters
----------
size_list : list
A list that contains the sizes for each composite system.
Returns
-------
perm_idx : list
List containing index permutations.
"""
return itertools.product(*[range(N) for N in size_list])
def METHOD_NAME(chi, lbls_list, title=None, fig=None, axes=None):
"""
Visualize the quantum process tomography chi matrix. Plot the real and
imaginary parts separately.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
axes : list of figure axis instance
User defined figure axis instance (list of two axes) used for
generating QPT plot.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if axes is None or len(axes) != 2:
if fig is None:
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(1, 2, 1, projection='3d', position=[0, 0, 1, 1])
ax2 = fig.add_subplot(1, 2, 2, projection='3d', position=[0, 0, 1, 1])
axes = [ax1, ax2]
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join([lbls_list[k][inds[k]]
for k in range(len(lbls_list))]))
matrix_histogram(real(chi), xlabels, xlabels, limits=[-1, 1], ax=axes[0])
axes[0].set_title(r"real($\chi$)")
matrix_histogram(imag(chi), xlabels, xlabels, limits=[-1, 1], ax=axes[1])
axes[1].set_title(r"imag($\chi$)")
if title and fig:
fig.suptitle(title)
return fig, axes
def qpt_plot_combined(chi, lbls_list, title=None,
fig=None, ax=None, figsize=(8, 6),
threshold=None):
"""
Visualize the quantum process tomography chi matrix. Plot bars with
height and color corresponding to the absolute value and phase,
respectively.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
ax : figure axis instance
User defined figure axis instance used for generating QPT plot
(alternative to the fig argument).
threshold: float (None)
Threshold for when bars of smaller height should be transparent. If
not set, all bars are colored according to the color map.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if ax is None:
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d', position=[0, 0, 1, 1])
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join(
[lbls_list[k][inds[k]] for k in range(len(lbls_list))]))
if not title:
title = r"$\chi$"
matrix_histogram(chi, xlabels, xlabels, bar_style='abs',
color_style='phase',
options={'threshold': threshold}, ax=ax)
ax.set_title(title)
return fig, ax
def qpt(U, op_basis_list):
"""
Calculate the quantum process tomography chi matrix for a given (possibly
nonunitary) transformation matrix U, which transforms a density matrix in
vector form according to:
vec(rho) = U * vec(rho0)
or
rho = unstack_columns(U * stack_columns(rho0))
U can be calculated for an open quantum system using the QuTiP propagator
function.
Parameters
----------
U : Qobj
Transformation operator. Can be calculated using QuTiP propagator
function.
op_basis_list : list
A list of Qobj's representing the basis states.
Returns
-------
chi : array
QPT chi matrix
"""
E_ops = []
# loop over all index permutations
for inds in _index_permutations([len(ops) for ops in op_basis_list]):
# loop over all composite systems
E_op_list = [op_basis_list[k][inds[k]] for k in range(len(
op_basis_list))]
E_ops.append(tensor(E_op_list))
EE_ops = [spre(E1) * spost(E2.dag()) for E1 in E_ops for E2 in E_ops]
M = hstack([EE.full().ravel('F')[:, None] for EE in EE_ops])
Uvec = U.full().ravel('F')
chi_vec = la.solve(M, Uvec)
return chi_vec.reshape(U.shape).T |
298,854 | set value ex | import sys
from types import TracebackType
from typing import Any
from typing_extensions import Literal, Self, TypeAlias, final
if sys.platform == "win32":
_KeyType: TypeAlias = HKEYType | int
def CloseKey(__hkey: _KeyType) -> None: ...
def ConnectRegistry(__computer_name: str | None, __key: _KeyType) -> HKEYType: ...
def CreateKey(__key: _KeyType, __sub_key: str | None) -> HKEYType: ...
def CreateKeyEx(key: _KeyType, sub_key: str | None, reserved: int = 0, access: int = 131078) -> HKEYType: ...
def DeleteKey(__key: _KeyType, __sub_key: str) -> None: ...
def DeleteKeyEx(key: _KeyType, sub_key: str, access: int = 256, reserved: int = 0) -> None: ...
def DeleteValue(__key: _KeyType, __value: str) -> None: ...
def EnumKey(__key: _KeyType, __index: int) -> str: ...
def EnumValue(__key: _KeyType, __index: int) -> tuple[str, Any, int]: ...
def ExpandEnvironmentStrings(__string: str) -> str: ...
def FlushKey(__key: _KeyType) -> None: ...
def LoadKey(__key: _KeyType, __sub_key: str, __file_name: str) -> None: ...
def OpenKey(key: _KeyType, sub_key: str, reserved: int = 0, access: int = 131097) -> HKEYType: ...
def OpenKeyEx(key: _KeyType, sub_key: str, reserved: int = 0, access: int = 131097) -> HKEYType: ...
def QueryInfoKey(__key: _KeyType) -> tuple[int, int, int]: ...
def QueryValue(__key: _KeyType, __sub_key: str | None) -> str: ...
def QueryValueEx(__key: _KeyType, __name: str) -> tuple[Any, int]: ...
def SaveKey(__key: _KeyType, __file_name: str) -> None: ...
def SetValue(__key: _KeyType, __sub_key: str, __type: int, __value: str) -> None: ...
def METHOD_NAME(
__key: _KeyType, __value_name: str | None, __reserved: Any, __type: int, __value: str | int
) -> None: ... # reserved is ignored
def DisableReflectionKey(__key: _KeyType) -> None: ...
def EnableReflectionKey(__key: _KeyType) -> None: ...
def QueryReflectionKey(__key: _KeyType) -> bool: ...
HKEY_CLASSES_ROOT: int
HKEY_CURRENT_USER: int
HKEY_LOCAL_MACHINE: int
HKEY_USERS: int
HKEY_PERFORMANCE_DATA: int
HKEY_CURRENT_CONFIG: int
HKEY_DYN_DATA: int
KEY_ALL_ACCESS: Literal[983103]
KEY_WRITE: Literal[131078]
KEY_READ: Literal[131097]
KEY_EXECUTE: Literal[131097]
KEY_QUERY_VALUE: Literal[1]
KEY_SET_VALUE: Literal[2]
KEY_CREATE_SUB_KEY: Literal[4]
KEY_ENUMERATE_SUB_KEYS: Literal[8]
KEY_NOTIFY: Literal[16]
KEY_CREATE_LINK: Literal[32]
KEY_WOW64_64KEY: Literal[256]
KEY_WOW64_32KEY: Literal[512]
REG_BINARY: Literal[3]
REG_DWORD: Literal[4]
REG_DWORD_LITTLE_ENDIAN: Literal[4]
REG_DWORD_BIG_ENDIAN: Literal[5]
REG_EXPAND_SZ: Literal[2]
REG_LINK: Literal[6]
REG_MULTI_SZ: Literal[7]
REG_NONE: Literal[0]
REG_QWORD: Literal[11]
REG_QWORD_LITTLE_ENDIAN: Literal[11]
REG_RESOURCE_LIST: Literal[8]
REG_FULL_RESOURCE_DESCRIPTOR: Literal[9]
REG_RESOURCE_REQUIREMENTS_LIST: Literal[10]
REG_SZ: Literal[1]
REG_CREATED_NEW_KEY: int # undocumented
REG_LEGAL_CHANGE_FILTER: int # undocumented
REG_LEGAL_OPTION: int # undocumented
REG_NOTIFY_CHANGE_ATTRIBUTES: int # undocumented
REG_NOTIFY_CHANGE_LAST_SET: int # undocumented
REG_NOTIFY_CHANGE_NAME: int # undocumented
REG_NOTIFY_CHANGE_SECURITY: int # undocumented
REG_NO_LAZY_FLUSH: int # undocumented
REG_OPENED_EXISTING_KEY: int # undocumented
REG_OPTION_BACKUP_RESTORE: int # undocumented
REG_OPTION_CREATE_LINK: int # undocumented
REG_OPTION_NON_VOLATILE: int # undocumented
REG_OPTION_OPEN_LINK: int # undocumented
REG_OPTION_RESERVED: int # undocumented
REG_OPTION_VOLATILE: int # undocumented
REG_REFRESH_HIVE: int # undocumented
REG_WHOLE_HIVE_VOLATILE: int # undocumented
error = OSError
# Though this class has a __name__ of PyHKEY, it's exposed as HKEYType for some reason
@final
class HKEYType:
def __bool__(self) -> bool: ...
def __int__(self) -> int: ...
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None
) -> bool | None: ...
def Close(self) -> None: ...
def Detach(self) -> int: ...
def __hash__(self) -> int: ... |
298,855 | get non existing attr | # Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def assert_raises(err, fn, *args, **kwargs):
raised = False
try:
fn(*args, **kwargs)
except err:
raised = True
assert raised
def test_reduce_ex_with_slots():
# Adapted from test_desc.test_issue24097
class A:
__slotnames__ = ['spam']
def __getattr__(self, attr):
if attr == 'spam':
return 42
else:
raise AttributeError
import copyreg
expected = (copyreg.__newobj__, (A,), (None, {'spam': 42}), None, None)
assert A().__reduce_ex__(2) == expected
def test_set_dict_attr_builtin_extension():
class MyList(list):
pass
lst = MyList()
assert lst.__dict__ == {}
lst.__dict__ = {'a': 9}
assert lst.a == 9
assert lst.__dict__ == {'a': 9}
def test_get_dict_attr():
o = object()
def get_dict_attr():
return o.__dict__
def set_dict_attr():
o.__dict__ = {'a': 10}
assert_raises(AttributeError, get_dict_attr)
assert_raises(AttributeError, set_dict_attr)
def test_set_dict_attr():
class MyClass(object):
def __init__(self):
self.a = 9
m = MyClass()
assert m.a == 9
assert m.__dict__ == {'a': 9}
assert m.a == 9
m.__dict__ = {'a': 10}
assert m.__dict__ == {'a': 10}
assert m.a == 10
m.d = 20
assert m.d == 20
assert "d" in m.__dict__
assert m.__dict__ == {'a': 10, 'd': 20}
# check dir & __dir__
assert sorted(list(m.__dir__())) == dir(m)
def test_set_attr_builtins():
lst = list()
def set_attr():
lst.a = 10
assert_raises(AttributeError, set_attr)
class MyList(list):
pass
mlst = MyList()
mlst.a = 10
assert mlst.a == 10
def test_set_dict_attr_with_getattr_defined():
class MyOtherClass(object):
def __getattribute__(self, item):
return object.__getattribute__(self, item)
def __getattr__(self, item):
if item == "my_attr":
return 10
raise AttributeError
m1 = MyOtherClass()
def METHOD_NAME():
return m1.my_attr_2
assert_raises(AttributeError, METHOD_NAME)
assert m1.my_attr == 10
assert "my_attr" not in m1.__dict__
m1.__dict__ = {'d': 10}
assert m1.my_attr == 10
assert "my_attr" not in m1.__dict__
assert m1.d == 10
def test_class_attr():
class AAA:
def foo(self):
assert __class__ == AAA
assert self.__class__ == AAA
class BBB:
pass
class CCC(AAA):
def getclass(self):
return BBB
__class__ = property(getclass)
def bar(self):
assert __class__ == CCC
assert self.__class__ == BBB
AAA().foo()
CCC().bar()
def test_reduce_ex_with_none():
assert_raises(TypeError, object(), None)
def test_descr_call_with_none():
descr = object.__dict__['__class__']
assert None.__class__ is type(None)
assert descr.__get__(None, type(None)) is descr
assert_raises(TypeError, descr.__get__, None, None)
def test_custom_getattribute():
class AAA:
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
def __index__(self):
return self.__wrapped__.__index__()
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __getattr__(self, name):
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __iter__(self):
return iter(self.__wrapped__)
class BBB(AAA):
def __init__(self, wrapped_dict=None):
AAA.__init__(self, wrapped_dict)
def __getattribute__(self, name):
if (hasattr(type(self), name)
and isinstance(getattr(type(self), name), property)):
return object.__getattribute__(self, name)
else:
return super().__getattribute__(name)
d = {"abc": 1}
assert dict(BBB(d)) == |
298,856 | required attributes | #!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.METHOD_NAME(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def METHOD_NAME(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.METHOD_NAME(Left, attrList)
if Right: resultRight, rightAttr = self.METHOD_NAME(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
298,857 | on 204 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vhub route-map delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a route map.
:example: Delete route map
az network vhub route-map delete -n route-map-name -g rg --vhub-name vhub-name
"""
_aaz_info = {
"version": "2022-05-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualhubs/{}/routemaps/{}", "2022-05-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.route_map_name = AAZStrArg(
options=["-n", "--name", "--route-map-name"],
help="The name of the RouteMap.",
required=True,
id_part="child_name_1",
)
_args_schema.vhub_name = AAZStrArg(
options=["--vhub-name"],
help="The name of the VirtualHub containing the RouteMap.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.RouteMapsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class RouteMapsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeMaps/{routeMapName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeMapName", self.ctx.args.route_map_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualHubName", self.ctx.args.vhub_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-05-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def METHOD_NAME(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
298,858 | make link | # Copyright (c) 2009-2014 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
"""
This module contains ...
"""
from __future__ import annotations
import os
from zope.interface import implementer
import twisted
import twisted.conch.ls
from twisted.conch.interfaces import ISFTPFile, ISFTPServer
from twisted.conch.ssh import filetransfer
from twisted.conch.ssh.filetransfer import (
FXF_APPEND,
FXF_CREAT,
FXF_EXCL,
FXF_READ,
FXF_TRUNC,
FXF_WRITE,
)
from twisted.python import log
from twisted.python.compat import nativeString
from cowrie.shell import pwd
from cowrie.core.config import CowrieConfig
@implementer(ISFTPFile)
class CowrieSFTPFile:
"""
SFTPTFile
"""
contents: bytes
bytesReceived: int = 0
bytesReceivedLimit: int = CowrieConfig.getint(
"honeypot", "download_limit_size", fallback=0
)
def __init__(self, sftpserver, filename, flags, attrs):
self.sftpserver = sftpserver
self.filename = filename
openFlags = 0
if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0:
openFlags = os.O_RDONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0:
openFlags = os.O_WRONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ:
openFlags = os.O_RDWR
if flags & FXF_APPEND == FXF_APPEND:
openFlags |= os.O_APPEND
if flags & FXF_CREAT == FXF_CREAT:
openFlags |= os.O_CREAT
if flags & FXF_TRUNC == FXF_TRUNC:
openFlags |= os.O_TRUNC
if flags & FXF_EXCL == FXF_EXCL:
openFlags |= os.O_EXCL
if "permissions" in attrs:
filemode = attrs["permissions"]
del attrs["permissions"]
else:
filemode = 0o777
fd = sftpserver.fs.open(filename, openFlags, filemode)
if attrs:
self.sftpserver.setAttrs(filename, attrs)
self.fd = fd
# Cache a copy of file in memory to read from in readChunk
if flags & FXF_READ == FXF_READ:
self.contents = self.sftpserver.fs.file_contents(self.filename)
def close(self):
if self.bytesReceived > 0:
self.sftpserver.fs.update_size(self.filename, self.bytesReceived)
return self.sftpserver.fs.close(self.fd)
def readChunk(self, offset: int, length: int) -> bytes:
return self.contents[offset : offset + length]
def writeChunk(self, offset: int, data: bytes) -> None:
self.bytesReceived += len(data)
if self.bytesReceivedLimit and self.bytesReceived > self.bytesReceivedLimit:
raise filetransfer.SFTPError(filetransfer.FX_FAILURE, "Quota exceeded")
self.sftpserver.fs.lseek(self.fd, offset, os.SEEK_SET)
self.sftpserver.fs.write(self.fd, data)
def getAttrs(self):
s = self.sftpserver.fs.stat(self.filename)
return self.sftpserver.getAttrs(s)
def setAttrs(self, attrs):
raise NotImplementedError
class CowrieSFTPDirectory:
def __init__(self, server, directory):
self.server = server
self.files = server.fs.listdir(directory)
self.files = [".", "..", *self.files]
self.dir = directory
def __iter__(self):
return self
def __next__(self):
try:
f = self.files.pop(0)
except IndexError:
raise StopIteration from None
if f == "..":
directory = self.dir.strip().split("/")
pdir = "/" + "/".join(directory[:-1])
s1 = self.server.fs.lstat(pdir)
s = self.server.fs.lstat(pdir)
s1.st_uid = pwd.Passwd().getpwuid(s.st_uid)["pw_name"]
s1.st_gid = pwd.Group().getgrgid(s.st_gid)["gr_name"]
longname = twisted.conch.ls.lsLine(f, s1)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
elif f == ".":
s1 = self.server.fs.lstat(self.dir)
s = self.server.fs.lstat(self.dir)
s1.st_uid = pwd.Passwd().getpwuid(s.st_uid)["pw_name"]
s1.st_gid = pwd.Group().getgrgid(s.st_gid)["gr_name"]
longname = twisted.conch.ls.lsLine(f, s1)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
else:
s = self.server.fs.lstat(os.path.join(self.dir, f))
s2 = self.server.fs.lstat(os.path.join(self.dir, f))
s2.st_uid = pwd.Passwd().getpwuid(s.st_uid)["pw_name"]
s2.st_gid = pwd.Group().getgrgid(s.st_gid)["gr_name"]
longname = twisted.conch.ls.lsLine(f, s2)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
def close(self):
self.files = []
@implementer(ISFTPServer)
class SFTPServerForCowrieUser:
def __init__(self, avatar):
self.avatar = avatar
self.avatar.server.initFileSystem(self.avatar.home)
self.fs = self.avatar.server.fs
def _absPath(self, path):
home = self.avatar.home
return os.path.abspath(os.path.join(nativeString(home), nativeString(path)))
def _setAttrs(self, path, attrs):
if "uid" in attrs and "gid" in attrs:
self.fs.chown(path, attrs["uid"], attrs["gid"])
if "permissions" in attrs:
self.fs.chmod(path, attrs["permissions"])
if "atime" in attrs and "mtime" in attrs:
self.fs.utime(path, attrs["atime"], attrs["mtime"])
def _getAttrs(self, s):
return {
"size": s.st_size,
"uid": s.st_uid,
"gid": s.st_gid,
"permissions": s.st_mode,
"atime": int(s.st_atime),
"mtime": int(s.st_mtime),
}
def gotVersion(self, otherVersion, extData):
return {}
def openFile(self, filename, flags, attrs):
log.msg(f"SFTP openFile: {filename}")
return CowrieSFTPFile(self, self._absPath(filename), flags, attrs)
def removeFile(self, filename):
log.msg(f"SFTP removeFile: {filename}")
return self.fs.remove(self._absPath(filename))
def renameFile(self, oldpath, newpath):
log.msg(f"SFTP renameFile: {oldpath} {newpath}")
return self.fs.rename(self._absPath(oldpath), self._absPath(newpath))
def makeDirectory(self, path, attrs):
log.msg(f"SFTP makeDirectory: {path}")
path = self._absPath(path)
self.fs.mkdir2(path)
self._setAttrs(path, attrs)
def removeDirectory(self, path):
log.msg(f"SFTP removeDirectory: {path}")
return self.fs.rmdir(self._absPath(path))
def openDirectory(self, path):
log.msg(f"SFTP OpenDirectory: {path}")
return CowrieSFTPDirectory(self, self._absPath(path))
def getAttrs(self, path, followLinks):
log.msg(f"SFTP getAttrs: {path}")
path = self._absPath(path)
if followLinks:
s = self.fs.stat(path)
else:
s = self.fs.lstat(path)
return self._getAttrs(s)
def setAttrs(self, path, attrs):
log.msg(f"SFTP setAttrs: {path}")
path = self._absPath(path)
return self._setAttrs(path, attrs)
def readLink(self, path):
log.msg(f"SFTP readLink: {path}")
path = self._absPath(path)
return self.fs.readlink(path)
def METHOD_NAME(self, linkPath, targetPath):
log.msg(f"SFTP makeLink: {linkPath} {targetPath}")
linkPath = self._absPath(linkPath)
targetPath = self._absPath(targetPath)
return self.fs.symlink(targetPath, linkPath)
def realPath(self, path):
return self.fs.realpath(self._absPath(path))
def extendedRequest(self, extName, extData):
raise NotImplementedError |
298,859 | download | import glob
import os
import os.path as osp
import shutil
from typing import Callable, Dict, List, Optional, Tuple
import torch
from torch import Tensor
from torch_geometric.data import (
Data,
InMemoryDataset,
download_url,
extract_zip,
)
from torch_geometric.io import read_off
class ModelNet(InMemoryDataset):
r"""The ModelNet10/40 datasets from the `"3D ShapeNets: A Deep
Representation for Volumetric Shapes"
<https://people.csail.mit.edu/khosla/papers/cvpr2015_wu.pdf>`_ paper,
containing CAD models of 10 and 40 categories, respectively.
.. note::
Data objects hold mesh faces instead of edge indices.
To convert the mesh to a graph, use the
:obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.
To convert the mesh to a point cloud, use the
:obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to
sample a fixed number of points on the mesh faces according to their
face area.
Args:
root (str): Root directory where the dataset should be saved.
name (str, optional): The name of the dataset (:obj:`"10"` for
ModelNet10, :obj:`"40"` for ModelNet40). (default: :obj:`"10"`)
train (bool, optional): If :obj:`True`, loads the training dataset,
otherwise the test dataset. (default: :obj:`True`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
**STATS:**
.. list-table::
:widths: 20 10 10 10 10 10
:header-rows: 1
* - Name
- #graphs
- #nodes
- #edges
- #features
- #classes
* - ModelNet10
- 4,899
- ~9,508.2
- ~37,450.5
- 3
- 10
* - ModelNet40
- 12,311
- ~17,744.4
- ~66,060.9
- 3
- 40
"""
urls = {
'10':
'http://vision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip',
'40': 'http://modelnet.cs.princeton.edu/ModelNet40.zip'
}
def __init__(
self,
root: str,
name: str = '10',
train: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None,
):
assert name in ['10', '40']
self.name = name
super().__init__(root, transform, pre_transform, pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
@property
def raw_file_names(self) -> List[str]:
return [
'bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor',
'night_stand', 'sofa', 'table', 'toilet'
]
@property
def processed_file_names(self) -> List[str]:
return ['training.pt', 'test.pt']
def METHOD_NAME(self):
path = download_url(self.urls[self.name], self.root)
extract_zip(path, self.root)
os.unlink(path)
folder = osp.join(self.root, f'ModelNet{self.name}')
shutil.rmtree(self.raw_dir)
os.rename(folder, self.raw_dir)
# Delete osx metadata generated during compression of ModelNet10
metadata_folder = osp.join(self.root, '__MACOSX')
if osp.exists(metadata_folder):
shutil.rmtree(metadata_folder)
def process(self):
torch.save(self.process_set('train'), self.processed_paths[0])
torch.save(self.process_set('test'), self.processed_paths[1])
def process_set(self, dataset: str) -> Tuple[Data, Dict[str, Tensor]]:
categories = glob.glob(osp.join(self.raw_dir, '*', ''))
categories = sorted([x.split(os.sep)[-2] for x in categories])
data_list = []
for target, category in enumerate(categories):
folder = osp.join(self.raw_dir, category, dataset)
paths = glob.glob(f'{folder}/{category}_*.off')
for path in paths:
data = read_off(path)
data.y = torch.tensor([target])
data_list.append(data)
if self.pre_filter is not None:
data_list = [d for d in data_list if self.pre_filter(d)]
if self.pre_transform is not None:
data_list = [self.pre_transform(d) for d in data_list]
return self.collate(data_list)
def __repr__(self) -> str:
return f'{self.__class__.__name__}{self.name}({len(self)})' |
298,860 | is ready | import strax
import straxen
import tarfile
import io
import os
from warnings import warn
from os import environ as os_environ
from immutabledict import immutabledict
from importlib import import_module
import numpy as np
export, __all__ = strax.exporter()
nt_test_run_id = '012882'
@export
def download_test_data(test_data='https://raw.githubusercontent.com/XENONnT/strax_auxiliary_files/353b2c60a01e96f67e4ba544ce284bd91241964d/strax_files/strax_test_data_straxv1.1.0.tar', # noqa
):
"""Downloads strax test data to strax_test_data in the current directory"""
blob = straxen.common.get_resource(test_data, fmt='binary')
f = io.BytesIO(blob)
tf = tarfile.open(fileobj=f)
tf.extractall()
@export
def _overwrite_testing_function_file(function_file):
"""For testing purposes allow this function file to be loaded from HOME/testing_folder"""
if not _is_on_pytest():
# If we are not on a pytest, never try using a local file.
return function_file
home = os.environ.get('HOME')
if home is None:
# Impossible to load from non-existent folder
return function_file
testing_file = os.path.join(home, function_file)
if os.path.exists(testing_file):
# For testing purposes allow loading from 'home/testing_folder'
warn(f'Using local function: {function_file} from {testing_file}! '
f'If you are not integrated testing on github you should '
f'absolutely remove this file. (See #559)')
function_file = testing_file
return function_file
def is_installed(module):
try:
import_module(module)
return True
except ModuleNotFoundError:
return False
@export
def _is_on_pytest():
"""Check if we are on a pytest"""
return 'PYTEST_CURRENT_TEST' in os_environ
def _get_fake_daq_reader():
class DAQReader(straxen.DAQReader):
"""
Dummy version of the DAQ reader to make sure that all the testing
data produced here will have a different lineage
"""
__version__ = "MOCKTESTDATA"
return DAQReader
def nt_test_context(target_context='xenonnt_online',
deregister=(),
keep_default_storage=False,
**kwargs) -> strax.Context:
"""
Get a dummy context with full nt-like data simulated data (except aqmon)
to allow testing plugins
:param target_context: Which contexts from straxen.contexts to test
:param deregister: a list of plugins from the context
:param keep_default_storage: if to True, keep the default context
storage. Usually, you don't need this since all the data will be
stored in a separate test data folder.
:param kwargs: Any kwargs are passed to the target-context
:return: a context
"""
if not straxen.utilix_is_configured(warning_message=False):
kwargs.setdefault('_database_init', False)
st = getattr(straxen.contexts, target_context)(**kwargs)
st.set_config({
'diagnose_sorting': True, 'diagnose_overlapping': True, 'store_per_channel': True})
st.register(_get_fake_daq_reader())
download_test_data('https://raw.githubusercontent.com/XENONnT/'
'strax_auxiliary_files/'
'f0d177401e11408b273564f0e29df77528e83d26/'
'strax_files/'
'012882-raw_records-z7q2d2ye2t.tar')
if keep_default_storage:
st.storage += [strax.DataDirectory('./strax_test_data')]
else:
st.storage = [strax.DataDirectory('./strax_test_data')]
assert st.is_stored(nt_test_run_id, 'raw_records'), os.listdir(st.storage[-1].path)
to_remove = list(deregister)
for plugin in to_remove:
del st._plugin_class_registry[plugin]
return st
def create_unique_intervals(size, time_range=(0, 40), allow_zero_length=True):
"""
Hypothesis stragtegy which creates unqiue time intervals.
:param size: Number of intervals desired. Can be less if non-unique
intervals are found.
:param time_range: Time range in which intervals should be.
:param allow_zero_length: If true allow zero length intervals.
"""
from hypothesis import strategies
strat = strategies.lists(elements=strategies.integers(*time_range),
min_size=size * 2,
max_size=size * 2
).map(lambda x: _convert_to_interval(x, allow_zero_length))
return strat
def _convert_to_interval(time_stamps, allow_zero_length):
time_stamps = np.sort(time_stamps)
intervals = np.zeros(len(time_stamps) // 2, strax.time_dt_fields)
intervals['dt'] = 1
intervals['time'] = time_stamps[::2]
intervals['length'] = time_stamps[1::2] - time_stamps[::2]
if not allow_zero_length:
intervals = intervals[intervals['length'] > 0]
return np.unique(intervals)
@strax.takes_config(
strax.Option('secret_time_offset', default=0, track=False),
strax.Option('recs_per_chunk', default=10, track=False),
strax.Option('n_chunks', default=2, track=False,
help='Number of chunks for the dummy raw records we are writing here'),
strax.Option('channel_map', track=False, type=immutabledict,
help="frozendict mapping subdetector to (min, max) "
"channel number.")
)
class DummyRawRecords(strax.Plugin):
"""
Provide dummy raw records for the mayor raw_record types
"""
provides = straxen.daqreader.DAQReader.provides
parallel = 'process'
depends_on = tuple()
data_kind = immutabledict(zip(provides, provides))
rechunk_on_save = False
dtype = {p: strax.raw_record_dtype() for p in provides}
def setup(self):
self.channel_map_keys = {'he': 'he',
'nv': 'nveto',
'aqmon': 'aqmon',
'aux_mv': 'aux_mv',
's_mv': 'mv',
} # s_mv otherwise same as aux in endswith
def source_finished(self):
return True
def METHOD_NAME(self, chunk_i):
return chunk_i < self.config['n_chunks']
def compute(self, chunk_i):
t0 = chunk_i + self.config['secret_time_offset']
if chunk_i < self.config['n_chunks'] - 1:
# One filled chunk
r = np.zeros(self.config['recs_per_chunk'], self.dtype['raw_records'])
r['time'] = t0
r['length'] = r['dt'] = 1
r['channel'] = np.arange(len(r))
else:
# One empty chunk
r = np.zeros(0, self.dtype['raw_records'])
res = {}
for p in self.provides:
rr = np.copy(r)
# Add detector specific channel offset:
for key, channel_key in self.channel_map_keys.items():
if channel_key not in self.config['channel_map']:
# Channel map for 1T is different.
continue
if p.endswith(key):
first_channel, last_channel = self.config['channel_map'][channel_key]
rr['channel'] += first_channel
if key == 'aqmon':
# explicitly clip these channels as we have an additional check higher in the chain
first_channel=int(min(straxen.AqmonChannels))
last_channel=int(max(straxen.AqmonChannels))
rr = rr[(rr['channel']>=first_channel) & (rr['channel']<last_channel)]
res[p] = self.chunk(start=t0, end=t0 + 1, data=rr, data_type=p)
return res |
298,861 | connect actions | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Base ExtensionActionCollection for extensions
"""
from PyQt5.QtWidgets import QMenu
import actioncollection
class ExtensionActionCollection(actioncollection.ActionCollection):
"""An ActionCollection descendant for use with extensions.
Apart from the added functionality it's noteworthy that
the 'name' variable is emptied and that the collection is local
to the extension (i.e. not used in the globa action collection
manager).
"""
name = ""
def __init__(self, extension):
self._extension = extension
super().__init__(extension.mainwindow())
# Initialize menu actions.
# By default the Tools menu entry is set to None
# (causing all actions to be used) while those for
# the editor and music viewer are empty lists.
# By overriding configure_menu_actions() any of the menus
# can be configured manually.
self._action_lists = {
'tools': None,
'editor': [],
'musicview': [],
'manuscriptview': []
}
# Interface for subclasses to override menus
self.configure_menu_actions()
# Interface for subclasses to handle actions
self.METHOD_NAME()
# Interface for initializing actions
self.load_settings()
def by_text(self):
"""Returns a list with all actions, sorted by the display text,
ignoring the & mmemonic character."""
result = list(self._actions.values())
result.sort(
key=lambda action: ''.join(c for c in action.text() if c != "&"))
return result
def configure_menu_actions(self):
"""Can be used in subclasses to configure the menu action behaviour.
Calling self.set_menu_action_list(target, actions) each of the menus
can be configured separately (see comments there)."""
pass
def METHOD_NAME(self):
"""Should be implemented when actions have to be connected
globally during the extension's lifetime. Alternatively
the actions can also
"""
pass
def extension(self):
return self._extension
def load_settings(self):
"""Should be implemented to initialize the actions."""
pass
def menu_actions(self, target):
"""Returns a list of actions for use in the extension's entry
in a menu. By default this is a list of all available
actions, sorted by the display text. For custom sorting or
a custom selection this method can be overridden."""
action_list = self._action_lists.get(target, None)
if action_list is None:
action_list = self._action_lists[target] = self.by_text()
return action_list
def set_menu_action_list(self, target, actions):
"""Specify the contents of a menu.
Each entry in the _action_lists dictionary can be None or a list.
If it is None then the self.by_text() method is used to create a flat
list of all actions, sorted by display name (default for Tools menu). Otherwise a list of actions or submenus can be stored. The context menus
are initialized to empty lists, meaning by default an extension does
*not* get an entry in any context menu.
"""
self._action_lists[target] = actions
def settings(self):
"""Reference to the extension's settings() object."""
return self.extension().settings() |
298,862 | test pandas dask | import asyncio
import tempfile
import time
from threading import Thread
import dagster_pandas as dagster_pd
import pytest
from dagster import (
VersionStrategy,
file_relative_path,
job,
op,
reconstructable,
)
from dagster._core.definitions.input import In
from dagster._core.definitions.job_definition import JobDefinition
from dagster._core.events import DagsterEventType
from dagster._core.execution.api import execute_job, execute_run_iterator
from dagster._core.test_utils import instance_for_test, nesting_graph
from dagster._utils import send_interrupt
from dagster_dask import DataFrame, dask_executor
from dask.distributed import Scheduler, Worker
@op
def simple(_):
return 1
def dask_engine_job() -> JobDefinition:
@job(
executor_def=dask_executor,
)
def job_def():
simple()
return job_def
def test_execute_on_dask_local():
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test(temp_dir=tempdir) as instance:
with execute_job(
reconstructable(dask_engine_job),
run_config={
"resources": {"io_manager": {"config": {"base_dir": tempdir}}},
"execution": {"config": {"cluster": {"local": {"timeout": 30}}}},
},
instance=instance,
) as result:
assert result.output_for_node("simple") == 1
def dask_nested_graph_job():
return nesting_graph(
6,
2,
).to_job(executor_def=dask_executor)
def test_composite_execute():
with instance_for_test() as instance:
with execute_job(
reconstructable(dask_nested_graph_job),
run_config={
"execution": {"config": {"cluster": {"local": {"timeout": 30}}}},
},
instance=instance,
) as result:
assert result.success
@op(ins={"df": In(dagster_pd.DataFrame)})
def pandas_op(_, df):
pass
def pandas_job() -> JobDefinition:
@job(
executor_def=dask_executor,
)
def job_def():
pandas_op()
return job_def
def METHOD_NAME():
run_config = {
"ops": {
"pandas_op": {
"inputs": {"df": {"csv": {"path": file_relative_path(__file__, "ex.csv")}}}
}
}
}
with instance_for_test() as instance:
with execute_job(
reconstructable(pandas_job),
run_config={
"execution": {"config": {"cluster": {"local": {"timeout": 30}}}},
**run_config,
},
instance=instance,
) as result:
assert result.success
@op(ins={"df": In(DataFrame)})
def dask_op(_, df):
pass
def dask_job() -> JobDefinition:
@job(
executor_def=dask_executor,
)
def job_def():
dask_op()
return job_def
def test_dask():
run_config = {
"ops": {
"dask_op": {
"inputs": {
"df": {"read": {"csv": {"path": file_relative_path(__file__, "ex*.csv")}}}
}
}
}
}
with instance_for_test() as instance:
with execute_job(
reconstructable(dask_job),
run_config={
"execution": {"config": {"cluster": {"local": {"timeout": 30}}}},
**run_config,
},
instance=instance,
) as result:
assert result.success
@op(ins={"df": In(DataFrame)})
def sleepy_dask_op(_, df):
start_time = time.time()
while True:
time.sleep(0.1)
if time.time() - start_time > 120:
raise Exception("Timed out")
def sleepy_dask_job() -> JobDefinition:
@job(
executor_def=dask_executor,
)
def job_def():
sleepy_dask_op()
return job_def
@pytest.mark.skip("""
Fails because 'DagsterExecutionInterruptedError' is not actually raised-- there's a timeout
instead. It's not clear that the test ever was working-- prior to conversion to op/job/graph
APIs, it appears to have been mistakenly not using the dask executor.
""")
def test_dask_terminate():
run_config = {
"execution": {"config": {"cluster": {"local": {"timeout": 30}}}},
"ops": {
"sleepy_dask_op": {
"inputs": {
"df": {"read": {"csv": {"path": file_relative_path(__file__, "ex*.csv")}}}
}
}
},
}
interrupt_thread = None
result_types = []
with instance_for_test() as instance:
dagster_run = instance.create_run_for_job(
sleepy_dask_job(),
run_config=run_config,
)
for event in execute_run_iterator(
i_job=reconstructable(sleepy_dask_job),
dagster_run=dagster_run,
instance=instance,
):
# Interrupt once the first step starts
if event.event_type == DagsterEventType.STEP_START and not interrupt_thread:
interrupt_thread = Thread(target=send_interrupt, args=())
interrupt_thread.start()
if event.event_type == DagsterEventType.STEP_FAILURE:
assert "DagsterExecutionInterruptedError" in event.event_specific_data.error.message
result_types.append(event.event_type)
assert interrupt_thread
interrupt_thread.join()
assert DagsterEventType.STEP_FAILURE in result_types
assert DagsterEventType.PIPELINE_FAILURE in result_types
@pytest.mark.skip(
"Failing with RuntimeError: This event loop is already running since distributed==2022.1.0"
)
def test_existing_scheduler():
def _execute(scheduler_address, instance):
with execute_job(
reconstructable(dask_engine_job),
run_config={
"execution": {"config": {"cluster": {"existing": {"address": scheduler_address}}}},
},
instance=instance,
) as result:
return result
async def _run_test():
with instance_for_test() as instance:
async with Scheduler() as scheduler:
async with Worker(scheduler.address) as _:
result = await asyncio.get_event_loop().run_in_executor(
None, _execute, scheduler.address, instance
)
assert result.success
assert result.output_for_node("simple") == 1
asyncio.get_event_loop().run_until_complete(_run_test())
@op
def foo_op():
return "foo"
class BasicVersionStrategy(VersionStrategy):
def get_op_version(self, _):
return "foo"
def foo_job() -> JobDefinition:
@job(
executor_def=dask_executor,
version_strategy=BasicVersionStrategy(),
)
def job_def():
foo_op()
return job_def
def test_dask_executor_memoization():
with instance_for_test() as instance:
with execute_job(
reconstructable(foo_job),
instance=instance,
run_config={"execution": {"config": {"cluster": {"local": {"timeout": 30}}}}},
) as result:
assert result.success
assert result.output_for_node("foo_op") == "foo"
with execute_job(
reconstructable(foo_job),
instance=instance,
run_config={"execution": {"config": {"cluster": {"local": {"timeout": 30}}}}},
) as result:
assert result.success
assert len(result.all_node_events) == 0 |
298,863 | loaddata | """
Manage Django sites
"""
import os
import salt.exceptions
import salt.utils.path
# Define the module's virtual name
__virtualname__ = "django"
def __virtual__():
return __virtualname__
def _get_django_admin(bin_env):
"""
Return the django admin
"""
if not bin_env:
if salt.utils.path.which("django-admin.py"):
return "django-admin.py"
elif salt.utils.path.which("django-admin"):
return "django-admin"
else:
raise salt.exceptions.CommandExecutionError(
"django-admin or django-admin.py not found on PATH"
)
# try to get django-admin.py bin from env
if os.path.exists(os.path.join(bin_env, "bin", "django-admin.py")):
return os.path.join(bin_env, "bin", "django-admin.py")
return bin_env
def command(
settings_module,
command,
bin_env=None,
pythonpath=None,
env=None,
runas=None,
*args,
**kwargs
):
"""
Run arbitrary django management command
CLI Example:
.. code-block:: bash
salt '*' django.command <settings_module> <command>
"""
dja = _get_django_admin(bin_env)
cmd = "{} {} --settings={}".format(dja, command, settings_module)
if pythonpath:
cmd = "{} --pythonpath={}".format(cmd, pythonpath)
for arg in args:
cmd = "{} --{}".format(cmd, arg)
for key, value in kwargs.items():
if not key.startswith("__"):
cmd = "{} --{}={}".format(cmd, key, value)
return __salt__["cmd.run"](cmd, env=env, runas=runas, python_shell=False)
def syncdb(
settings_module,
bin_env=None,
migrate=False,
database=None,
pythonpath=None,
env=None,
noinput=True,
runas=None,
):
"""
Run syncdb
Execute the Django-Admin syncdb command, if South is available on the
minion the ``migrate`` option can be passed as ``True`` calling the
migrations to run after the syncdb completes
NOTE: The syncdb command was deprecated in Django 1.7 and removed in Django 1.9.
For Django versions 1.9 or higher use the `migrate` command instead.
CLI Example:
.. code-block:: bash
salt '*' django.syncdb <settings_module>
"""
args = []
kwargs = {}
if migrate:
args.append("migrate")
if database:
kwargs["database"] = database
if noinput:
args.append("noinput")
return command(
settings_module, "syncdb", bin_env, pythonpath, env, runas, *args, **kwargs
)
def migrate(
settings_module,
app_label=None,
migration_name=None,
bin_env=None,
database=None,
pythonpath=None,
env=None,
noinput=True,
runas=None,
):
"""
Run migrate
Execute the Django-Admin migrate command (requires Django 1.7 or higher).
.. versionadded:: 3000
settings_module
Specifies the settings module to use.
The settings module should be in Python package syntax, e.g. mysite.settings.
If this isn’t provided, django-admin will use the DJANGO_SETTINGS_MODULE
environment variable.
app_label
Specific app to run migrations for, instead of all apps.
This may involve running other apps’ migrations too, due to dependencies.
migration_name
Named migration to be applied to a specific app.
Brings the database schema to a state where the named migration is applied,
but no later migrations in the same app are applied. This may involve
unapplying migrations if you have previously migrated past the named migration.
Use the name zero to unapply all migrations for an app.
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
database
Database to migrate. Defaults to 'default'.
pythonpath
Adds the given filesystem path to the Python import search path.
If this isn’t provided, django-admin will use the PYTHONPATH environment variable.
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
module.run:
- name: django.migrate
- settings_module: my_django_app.settings
- env:
- DATABASE_USER: 'mydbuser'
noinput
Suppresses all user prompts. Defaults to True.
runas
The user name to run the command as.
CLI Example:
.. code-block:: bash
salt '*' django.migrate <settings_module>
salt '*' django.migrate <settings_module> <app_label>
salt '*' django.migrate <settings_module> <app_label> <migration_name>
"""
args = []
kwargs = {}
if database:
kwargs["database"] = database
if noinput:
args.append("noinput")
if app_label and migration_name:
cmd = "migrate {} {}".format(app_label, migration_name)
elif app_label:
cmd = "migrate {}".format(app_label)
else:
cmd = "migrate"
return command(
settings_module, cmd, bin_env, pythonpath, env, runas, *args, **kwargs
)
def createsuperuser(
settings_module,
username,
email,
bin_env=None,
database=None,
pythonpath=None,
env=None,
runas=None,
):
"""
Create a super user for the database.
This function defaults to use the ``--noinput`` flag which prevents the
creation of a password for the superuser.
CLI Example:
.. code-block:: bash
salt '*' django.createsuperuser <settings_module> user user@example.com
"""
args = ["noinput"]
kwargs = dict(
email=email,
username=username,
)
if database:
kwargs["database"] = database
return command(
settings_module,
"createsuperuser",
bin_env,
pythonpath,
env,
runas,
*args,
**kwargs
)
def METHOD_NAME(
settings_module, fixtures, bin_env=None, database=None, pythonpath=None, env=None
):
"""
Load fixture data
Fixtures:
comma separated list of fixtures to load
CLI Example:
.. code-block:: bash
salt '*' django.loaddata <settings_module> <comma delimited list of fixtures>
"""
args = []
kwargs = {}
if database:
kwargs["database"] = database
cmd = "{} {}".format("loaddata", " ".join(fixtures.split(",")))
return command(settings_module, cmd, bin_env, pythonpath, env, *args, **kwargs)
def collectstatic(
settings_module,
bin_env=None,
no_post_process=False,
ignore=None,
dry_run=False,
clear=False,
link=False,
no_default_ignore=False,
pythonpath=None,
env=None,
runas=None,
):
"""
Collect static files from each of your applications into a single location
that can easily be served in production.
CLI Example:
.. code-block:: bash
salt '*' django.collectstatic <settings_module>
"""
args = ["noinput"]
kwargs = {}
if no_post_process:
args.append("no-post-process")
if ignore:
kwargs["ignore"] = ignore
if dry_run:
args.append("dry-run")
if clear:
args.append("clear")
if link:
args.append("link")
if no_default_ignore:
args.append("no-default-ignore")
return command(
settings_module,
"collectstatic",
bin_env,
pythonpath,
env,
runas,
*args,
**kwargs
) |
298,864 | test is dir | import os.path
import sys
import pathlib
import unittest
from importlib import import_module
from importlib.readers import MultiplexedPath, NamespaceReader
class MultiplexedPathTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pathlib.Path(__file__).parent / 'namespacedata01'
cls.folder = str(path)
def test_init_no_paths(self):
with self.assertRaises(FileNotFoundError):
MultiplexedPath()
def test_init_file(self):
with self.assertRaises(NotADirectoryError):
MultiplexedPath(os.path.join(self.folder, 'binary.file'))
def test_iterdir(self):
contents = {path.name for path in MultiplexedPath(self.folder).iterdir()}
try:
contents.remove('__pycache__')
except (KeyError, ValueError):
pass
self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'})
def test_iterdir_duplicate(self):
data01 = os.path.abspath(os.path.join(__file__, '..', 'data01'))
contents = {
path.name for path in MultiplexedPath(self.folder, data01).iterdir()
}
for remove in ('__pycache__', '__init__.pyc'):
try:
contents.remove(remove)
except (KeyError, ValueError):
pass
self.assertEqual(
contents,
{'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'},
)
def METHOD_NAME(self):
self.assertEqual(MultiplexedPath(self.folder).is_dir(), True)
def test_is_file(self):
self.assertEqual(MultiplexedPath(self.folder).is_file(), False)
def test_open_file(self):
path = MultiplexedPath(self.folder)
with self.assertRaises(FileNotFoundError):
path.read_bytes()
with self.assertRaises(FileNotFoundError):
path.read_text()
with self.assertRaises(FileNotFoundError):
path.open()
def test_join_path(self):
prefix = os.path.abspath(os.path.join(__file__, '..'))
data01 = os.path.join(prefix, 'data01')
path = MultiplexedPath(self.folder, data01)
self.assertEqual(
str(path.joinpath('binary.file'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'binary.file'),
)
self.assertEqual(
str(path.joinpath('subdirectory'))[len(prefix) + 1 :],
os.path.join('data01', 'subdirectory'),
)
self.assertEqual(
str(path.joinpath('imaginary'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'imaginary'),
)
def test_repr(self):
self.assertEqual(
repr(MultiplexedPath(self.folder)),
f"MultiplexedPath('{self.folder}')",
)
def test_name(self):
self.assertEqual(
MultiplexedPath(self.folder).name,
os.path.basename(self.folder),
)
class NamespaceReaderTest(unittest.TestCase):
site_dir = str(pathlib.Path(__file__).parent)
@classmethod
def setUpClass(cls):
sys.path.append(cls.site_dir)
@classmethod
def tearDownClass(cls):
sys.path.remove(cls.site_dir)
def test_init_error(self):
with self.assertRaises(ValueError):
NamespaceReader(['path1', 'path2'])
def test_resource_path(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertEqual(
reader.resource_path('binary.file'), os.path.join(root, 'binary.file')
)
self.assertEqual(
reader.resource_path('imaginary'), os.path.join(root, 'imaginary')
)
def test_files(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertIsInstance(reader.files(), MultiplexedPath)
self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')")
if __name__ == '__main__':
unittest.main() |
298,865 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
'get_cluster_output',
]
@pulumi.output_type
class GetClusterResult:
"""
A cluster resource
"""
def __init__(__self__, cluster_id=None, cluster_size=None, hosts=None, id=None, METHOD_NAME=None, provisioning_state=None, sku=None, type=None):
if cluster_id and not isinstance(cluster_id, int):
raise TypeError("Expected argument 'cluster_id' to be a int")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_size and not isinstance(cluster_size, int):
raise TypeError("Expected argument 'cluster_size' to be a int")
pulumi.set(__self__, "cluster_size", cluster_size)
if hosts and not isinstance(hosts, list):
raise TypeError("Expected argument 'hosts' to be a list")
pulumi.set(__self__, "hosts", hosts)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="clusterId")
def cluster_id(self) -> int:
"""
The identity
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(METHOD_NAME="clusterSize")
def cluster_size(self) -> Optional[int]:
"""
The cluster size
"""
return pulumi.get(self, "cluster_size")
@property
@pulumi.getter
def hosts(self) -> Optional[Sequence[str]]:
"""
The hosts
"""
return pulumi.get(self, "hosts")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The state of the cluster provisioning
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The cluster SKU
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
cluster_id=self.cluster_id,
cluster_size=self.cluster_size,
hosts=self.hosts,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
sku=self.sku,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
A cluster resource
:param str cluster_name: Name of the cluster in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20220501:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
cluster_id=pulumi.get(__ret__, 'cluster_id'),
cluster_size=pulumi.get(__ret__, 'cluster_size'),
hosts=pulumi.get(__ret__, 'hosts'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
sku=pulumi.get(__ret__, 'sku'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_cluster)
def get_cluster_output(cluster_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:
"""
A cluster resource
:param str cluster_name: Name of the cluster in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,866 | thrower | # Copyright The Caikit Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard
import multiprocessing
import time
# Third Party
import pytest
# Local
from caikit.core.toolkit.destroyable_process import DestroyableProcess
## Helpers #####################################################################
EXPECTED_THROW = ValueError("test-any-error")
EXPECTED_SUCCESS = "test-any-result"
def infinite_wait():
while True:
time.sleep(0.1)
def long_sleep():
time.sleep(1000)
def METHOD_NAME():
raise EXPECTED_THROW
def succeeder():
return EXPECTED_SUCCESS
@pytest.fixture(
params=["fork", "forkserver", "spawn"],
)
def process_type(request):
yield request.param
## Tests #######################################################################
def test_processes_can_be_interrupted(process_type):
proc = DestroyableProcess(process_type, infinite_wait)
proc.start()
assert not proc.destroyed
assert not proc.canceled
assert not proc.ran
assert not proc.threw
proc.destroy()
proc.join(60)
assert not proc.is_alive()
assert proc.destroyed
assert proc.canceled
assert proc.ran
assert not proc.threw
def test_processes_can_return_results(process_type):
proc = DestroyableProcess(process_type, succeeder, return_result=True)
proc.start()
proc.join()
assert EXPECTED_SUCCESS == proc.get_or_throw()
assert not proc.destroyed
assert not proc.canceled
assert proc.ran
assert not proc.threw
def test_process_not_canceled_after_success(process_type):
proc = DestroyableProcess(process_type, succeeder)
proc.start()
proc.join()
assert not proc.canceled
proc.destroy()
assert not proc.canceled
def test_processes_can_be_set_to_not_return_results(process_type):
proc = DestroyableProcess(process_type, succeeder, return_result=False)
proc.start()
proc.join()
assert proc.get_or_throw() is None
assert not proc.destroyed
assert not proc.canceled
assert proc.ran
assert not proc.threw
def test_processes_can_throw(process_type):
proc = DestroyableProcess(process_type, METHOD_NAME)
proc.start()
proc.join()
assert not proc.destroyed
assert not proc.canceled
assert proc.ran
assert proc.threw
with pytest.raises(ValueError) as ctx:
proc.get_or_throw()
assert str(EXPECTED_THROW) == str(ctx.value)
def test_processes_will_not_execute_if_destroyed_before_starting(process_type):
proc = DestroyableProcess(process_type, long_sleep)
proc.destroy()
proc.start()
assert not proc.is_alive()
proc.join()
with pytest.raises(RuntimeError):
proc.get_or_throw()
assert proc.destroyed
assert proc.canceled
assert not proc.ran
assert proc.threw
def test_event_is_set_on_completion(process_type):
event = multiprocessing.get_context(process_type).Event()
proc = DestroyableProcess(process_type, succeeder, completion_event=event)
assert not event.is_set()
proc.start()
proc.join()
assert event.is_set()
assert not proc.destroyed
assert not proc.canceled
assert proc.ran
assert not proc.threw
def test_event_is_set_on_exception(process_type):
event = multiprocessing.get_context(process_type).Event()
proc = DestroyableProcess(process_type, METHOD_NAME, completion_event=event)
assert not event.is_set()
proc.start()
proc.join()
assert event.is_set()
assert not proc.destroyed
assert not proc.canceled
assert proc.ran
assert proc.threw
def test_default_event_is_set_on_completion(process_type):
proc = DestroyableProcess(process_type, succeeder)
assert not proc.completion_event.is_set()
proc.start()
proc.join()
assert proc.completion_event.is_set() |
298,867 | stop build | from moto.core.responses import BaseResponse
from .models import codebuild_backends, CodeBuildBackend
from .exceptions import (
InvalidInputException,
ResourceAlreadyExistsException,
ResourceNotFoundException,
)
import json
import re
from typing import Any, Dict, List
def _validate_required_params_source(source: Dict[str, Any]) -> None:
if source["type"] not in [
"BITBUCKET",
"CODECOMMIT",
"CODEPIPELINE",
"GITHUB",
"GITHUB_ENTERPRISE",
"NO_SOURCE",
"S3",
]:
raise InvalidInputException("Invalid type provided: Project source type")
if "location" not in source:
raise InvalidInputException("Project source location is required")
if source["location"] == "":
raise InvalidInputException("Project source location is required")
def _validate_required_params_service_role(account_id: str, service_role: str) -> None:
if not service_role.startswith(f"arn:aws:iam::{account_id}:role/"):
raise InvalidInputException(
"Invalid service role: Service role account ID does not match caller's account"
)
def _validate_required_params_artifacts(artifacts: Dict[str, Any]) -> None:
if artifacts["type"] not in ["CODEPIPELINE", "S3", "NO_ARTIFACTS"]:
raise InvalidInputException("Invalid type provided: Artifact type")
if artifacts["type"] == "NO_ARTIFACTS":
if "location" in artifacts:
raise InvalidInputException(
"Invalid artifacts: artifact type NO_ARTIFACTS should have null location"
)
elif "location" not in artifacts or artifacts["location"] == "":
raise InvalidInputException("Project source location is required")
def _validate_required_params_environment(environment: Dict[str, Any]) -> None:
if environment["type"] not in [
"WINDOWS_CONTAINER",
"LINUX_CONTAINER",
"LINUX_GPU_CONTAINER",
"ARM_CONTAINER",
]:
raise InvalidInputException(f"Invalid type provided: {environment['type']}")
if environment["computeType"] not in [
"BUILD_GENERAL1_SMALL",
"BUILD_GENERAL1_MEDIUM",
"BUILD_GENERAL1_LARGE",
"BUILD_GENERAL1_2XLARGE",
]:
raise InvalidInputException(
f"Invalid compute type provided: {environment['computeType']}"
)
def _validate_required_params_project_name(name: str) -> None:
if len(name) >= 150:
raise InvalidInputException(
"Only alphanumeric characters, dash, and underscore are supported"
)
if not re.match(r"^[A-Za-z]{1}.*[^!£$%^&*()+=|?`¬{}@~#:;<>\\/\[\]]$", name):
raise InvalidInputException(
"Only alphanumeric characters, dash, and underscore are supported"
)
def _validate_required_params_id(build_id: str, build_ids: List[str]) -> None:
if ":" not in build_id:
raise InvalidInputException("Invalid build ID provided")
if build_id not in build_ids:
raise ResourceNotFoundException(f"Build {build_id} does not exist")
class CodeBuildResponse(BaseResponse):
@property
def codebuild_backend(self) -> CodeBuildBackend:
return codebuild_backends[self.current_account][self.region]
def list_builds_for_project(self) -> str:
_validate_required_params_project_name(self._get_param("projectName"))
if (
self._get_param("projectName")
not in self.codebuild_backend.codebuild_projects.keys()
):
name = self._get_param("projectName")
raise ResourceNotFoundException(
f"The provided project arn:aws:codebuild:{self.region}:{self.current_account}:project/{name} does not exist"
)
ids = self.codebuild_backend.list_builds_for_project(
self._get_param("projectName")
)
return json.dumps({"ids": ids})
def create_project(self) -> str:
_validate_required_params_source(self._get_param("source"))
service_role = self._get_param("serviceRole")
_validate_required_params_service_role(self.current_account, service_role)
_validate_required_params_artifacts(self._get_param("artifacts"))
_validate_required_params_environment(self._get_param("environment"))
_validate_required_params_project_name(self._get_param("name"))
if self._get_param("name") in self.codebuild_backend.codebuild_projects.keys():
name = self._get_param("name")
raise ResourceAlreadyExistsException(
f"Project already exists: arn:aws:codebuild:{self.region}:{self.current_account}:project/{name}"
)
project_metadata = self.codebuild_backend.create_project(
self._get_param("name"),
self._get_param("source"),
self._get_param("artifacts"),
self._get_param("environment"),
service_role=service_role,
)
return json.dumps({"project": project_metadata})
def list_projects(self) -> str:
project_metadata = self.codebuild_backend.list_projects()
return json.dumps({"projects": project_metadata})
def start_build(self) -> str:
_validate_required_params_project_name(self._get_param("projectName"))
if (
self._get_param("projectName")
not in self.codebuild_backend.codebuild_projects.keys()
):
name = self._get_param("projectName")
raise ResourceNotFoundException(
f"Project cannot be found: arn:aws:codebuild:{self.region}:{self.current_account}:project/{name}"
)
metadata = self.codebuild_backend.start_build(
self._get_param("projectName"),
self._get_param("sourceVersion"),
self._get_param("artifactsOverride"),
)
return json.dumps({"build": metadata})
def batch_get_builds(self) -> str:
for build_id in self._get_param("ids"):
if ":" not in build_id:
raise InvalidInputException("Invalid build ID provided")
metadata = self.codebuild_backend.batch_get_builds(self._get_param("ids"))
return json.dumps({"builds": metadata})
def list_builds(self) -> str:
ids = self.codebuild_backend.list_builds()
return json.dumps({"ids": ids})
def delete_project(self) -> str:
_validate_required_params_project_name(self._get_param("name"))
self.codebuild_backend.delete_project(self._get_param("name"))
return "{}"
def METHOD_NAME(self) -> str:
_validate_required_params_id(
self._get_param("id"), self.codebuild_backend.list_builds()
)
metadata = self.codebuild_backend.METHOD_NAME(self._get_param("id"))
return json.dumps({"build": metadata}) |
298,868 | settings build | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.build import cross_building
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rename, rm, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain, PkgConfigDeps
from conan.tools.layout import basic_layout
from conan.tools.microsoft import check_min_vs, is_msvc, msvc_runtime_flag
import os
required_conan_version = ">=1.57.0"
class CoinOsiConan(ConanFile):
name = "coin-osi"
description = "COIN-OR Linear Programming Solver"
topics = ("clp", "simplex", "solver", "linear", "programming")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/coin-or/Osi"
license = "EPL-2.0"
package_type = "library"
settings = "os", "arch", "build_type", "compiler"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def METHOD_NAME(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("coin-utils/2.11.6")
def validate(self):
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration("coin-osi does not support shared builds on Windows")
# FIXME: This issue likely comes from very old autotools versions used to produce configure.
if hasattr(self, "settings_build") and cross_building(self) and self.options.shared:
raise ConanInvalidConfiguration("coin-osi shared not supported yet when cross-building")
def build_requirements(self):
self.tool_requires("gnu-config/cci.20210814")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.3")
if self.METHOD_NAME.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
tc = AutotoolsToolchain(self)
tc.configure_args.extend([
"--without-blas",
"--without-lapack",
])
if is_msvc(self):
tc.extra_cxxflags.append("-EHsc")
tc.configure_args.append(f"--enable-msvc={msvc_runtime_flag(self)}")
if check_min_vs(self, "180", raise_invalid=False):
tc.extra_cflags.append("-FS")
tc.extra_cxxflags.append("-FS")
env = tc.environment()
if is_msvc(self):
env.define("CC", "cl -nologo")
env.define("CXX", "cl -nologo")
env.define("LD", "link -nologo")
env.define("AR", "lib -nologo")
if self.METHOD_NAME.os == "Windows":
# TODO: Something to fix in conan client or pkgconf recipe?
# This is a weird workaround when build machine is Windows. Here we have to inject regular
# Windows path to pc files folder instead of unix path flavor injected by AutotoolsToolchain...
env.define("PKG_CONFIG_PATH", self.generators_folder)
tc.generate(env)
deps = PkgConfigDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
for gnu_config in [
self.conf.get("user.gnu-config:config_guess", check_type=str),
self.conf.get("user.gnu-config:config_sub", check_type=str),
]:
if gnu_config:
copy(self, os.path.basename(gnu_config), src=os.path.dirname(gnu_config), dst=self.source_folder)
autotools = Autotools(self)
autotools.configure()
autotools.make()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
autotools.install(args=["-j1"])
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
fix_apple_shared_install_name(self)
if is_msvc(self):
for l in ("Osi", "OsiCommonTests"):
rename(self, os.path.join(self.package_folder, "lib", f"lib{l}.lib"),
os.path.join(self.package_folder, "lib", f"{l}.lib"))
def package_info(self):
self.cpp_info.components["libosi"].set_property("pkg_config_name", "osi")
self.cpp_info.components["libosi"].libs = ["Osi"]
self.cpp_info.components["libosi"].includedirs = [os.path.join("include", "coin")]
self.cpp_info.components["libosi"].requires = ["coin-utils::coin-utils"]
self.cpp_info.components["osi-unittests"].set_property("pkg_config_name", "osi-unittests")
self.cpp_info.components["osi-unittests"].libs = ["OsiCommonTests"]
self.cpp_info.components["osi-unittests"].requires = ["libosi"] |
298,869 | retry | from __future__ import annotations
import resource
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
from typing import Any, Callable, Iterable, Sequence, Type
# XXX(mdtro): backwards compatible imports for celery 4.4.7, remove after upgrade to 5.2.7
import celery
from sentry.silo.base import SiloLimit, SiloMode
if celery.version_info >= (5, 2):
from celery import current_task
else:
from celery.task import current as current_task
from sentry.celery import app
from sentry.utils import metrics
from sentry.utils.sdk import capture_exception, configure_scope
class TaskSiloLimit(SiloLimit):
"""
Silo limiter for celery tasks
We don't want tasks to be spawned in the incorrect silo.
We can't reliably cause tasks to fail as not all tasks use
the ORM (which also has silo bound safety).
"""
def handle_when_unavailable(
self,
original_method: Callable[..., Any],
current_mode: SiloMode,
available_modes: Iterable[SiloMode],
) -> Callable[..., Any]:
def handle(*args: Any, **kwargs: Any) -> Any:
name = original_method.__name__
message = f"Cannot call or spawn {name} in {current_mode},"
raise self.AvailabilityError(message)
return handle
def __call__(self, decorated_task: Any) -> Any:
# Replace the celery.Task interface we use.
replacements = {"delay", "apply_async", "s", "signature", "retry", "apply", "run"}
for attr_name in replacements:
task_attr = getattr(decorated_task, attr_name)
if callable(task_attr):
limited_attr = self.create_override(task_attr)
setattr(decorated_task, attr_name, limited_attr)
limited_func = self.create_override(decorated_task)
if hasattr(decorated_task, "name"):
limited_func.name = decorated_task.name
return limited_func
def get_rss_usage():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
@contextmanager
def track_memory_usage(metric, **kwargs):
before = get_rss_usage()
try:
yield
finally:
metrics.timing(metric, get_rss_usage() - before, **kwargs)
def load_model_from_db(cls, instance_or_id, allow_cache=True):
"""Utility function to allow a task to transition to passing ids rather than model instances."""
if isinstance(instance_or_id, int):
if hasattr(cls.objects, "get_from_cache") and allow_cache:
return cls.objects.get_from_cache(pk=instance_or_id)
return cls.objects.get(pk=instance_or_id)
return instance_or_id
def instrumented_task(name, stat_suffix=None, silo_mode=None, record_timing=False, **kwargs):
"""
Decorator for defining celery tasks.
Includes a few application specific batteries like:
- statsd metrics for duration and memory usage.
- sentry sdk tagging.
- hybrid cloud silo restrictions
- disabling of result collection.
"""
def wrapped(func):
@wraps(func)
def _wrapped(*args, **kwargs):
# TODO(dcramer): we want to tag a transaction ID, but overriding
# the base on app.task seems to cause problems w/ Celery internals
transaction_id = kwargs.pop("__transaction_id", None)
start_time = kwargs.pop("__start_time", None)
key = "jobs.duration"
if stat_suffix:
instance = f"{name}.{stat_suffix(*args, **kwargs)}"
else:
instance = name
if start_time and record_timing:
curr_time = datetime.now().timestamp()
duration = (curr_time - start_time) * 1000
metrics.timing(
"jobs.queue_time",
duration,
instance=instance,
)
with configure_scope() as scope:
scope.set_tag("task_name", name)
scope.set_tag("transaction_id", transaction_id)
with metrics.timer(key, instance=instance), track_memory_usage(
"jobs.memory_change", instance=instance
):
result = func(*args, **kwargs)
return result
# We never use result backends in Celery. Leaving `trail=True` means that if we schedule
# many tasks from a parent task, each task leaks memory. This can lead to the scheduler
# being OOM killed.
kwargs["trail"] = False
task = app.task(name=name, **kwargs)(_wrapped)
if silo_mode:
silo_limiter = TaskSiloLimit(silo_mode)
return silo_limiter(task)
return task
return wrapped
def METHOD_NAME(
func: Callable[..., Any] | None = None,
on: Sequence[Type[Exception]] = (Exception,),
exclude: Sequence[Type[Exception]] = (),
ignore: Sequence[Type[Exception]] = (),
) -> Callable[..., Callable[..., Any]]:
"""
>>> @retry(on=(Exception,), exclude=(AnotherException,), ignore=(IgnorableException,))
>>> def my_task():
>>> ...
"""
if func:
return METHOD_NAME()(func)
def inner(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ignore:
return
except exclude:
raise
except on as exc:
capture_exception()
current_task.METHOD_NAME(exc=exc)
return wrapped
return inner
def track_group_async_operation(function):
def wrapper(*args, **kwargs):
from sentry.utils import snuba
try:
response = function(*args, **kwargs)
metrics.incr(
"group.update.async_response",
sample_rate=1.0,
tags={"status": 500 if response is False else 200},
)
return response
except snuba.RateLimitExceeded:
metrics.incr("group.update.async_response", sample_rate=1.0, tags={"status": 429})
raise
except Exception:
metrics.incr("group.update.async_response", sample_rate=1.0, tags={"status": 500})
# Continue raising the error now that we've incr the metric
raise
return wrapper |
298,870 | destination | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWorkloadNetworkPortMirroringResult',
'AwaitableGetWorkloadNetworkPortMirroringResult',
'get_workload_network_port_mirroring',
'get_workload_network_port_mirroring_output',
]
@pulumi.output_type
class GetWorkloadNetworkPortMirroringResult:
"""
NSX Port Mirroring
"""
def __init__(__self__, METHOD_NAME=None, direction=None, display_name=None, id=None, name=None, provisioning_state=None, revision=None, source=None, status=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'destination' to be a str")
pulumi.set(__self__, "destination", METHOD_NAME)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if revision and not isinstance(revision, float):
raise TypeError("Expected argument 'revision' to be a float")
pulumi.set(__self__, "revision", revision)
if source and not isinstance(source, str):
raise TypeError("Expected argument 'source' to be a str")
pulumi.set(__self__, "source", source)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Destination VM Group.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def direction(self) -> Optional[str]:
"""
Direction of port mirroring profile.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name of the port mirroring profile.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def revision(self) -> Optional[float]:
"""
NSX revision number.
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter
def source(self) -> Optional[str]:
"""
Source VM Group.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def status(self) -> str:
"""
Port Mirroring Status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWorkloadNetworkPortMirroringResult(GetWorkloadNetworkPortMirroringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkloadNetworkPortMirroringResult(
METHOD_NAME=self.METHOD_NAME,
direction=self.direction,
display_name=self.display_name,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
revision=self.revision,
source=self.source,
status=self.status,
type=self.type)
def get_workload_network_port_mirroring(port_mirroring_id: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadNetworkPortMirroringResult:
"""
NSX Port Mirroring
:param str port_mirroring_id: NSX Port Mirroring identifier. Generally the same as the Port Mirroring display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['portMirroringId'] = port_mirroring_id
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20220501:getWorkloadNetworkPortMirroring', __args__, opts=opts, typ=GetWorkloadNetworkPortMirroringResult).value
return AwaitableGetWorkloadNetworkPortMirroringResult(
METHOD_NAME=pulumi.get(__ret__, 'destination'),
direction=pulumi.get(__ret__, 'direction'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
revision=pulumi.get(__ret__, 'revision'),
source=pulumi.get(__ret__, 'source'),
status=pulumi.get(__ret__, 'status'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workload_network_port_mirroring)
def get_workload_network_port_mirroring_output(port_mirroring_id: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkPortMirroringResult]:
"""
NSX Port Mirroring
:param str port_mirroring_id: NSX Port Mirroring identifier. Generally the same as the Port Mirroring display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,871 | test invalid initialization | # Copyright 2008-2020 pydicom authors. See LICENSE file for details.
"""Unit tests for the pydicom.sequence module."""
import copy
import weakref
import pytest
from pydicom.dataset import Dataset
from pydicom.sequence import Sequence
class TestSequence:
def testDefaultInitialization(self):
"""Sequence: Ensure a valid Sequence is created"""
empty = Sequence()
assert 0 == len(empty)
def testValidInitialization(self):
"""Sequence: Ensure valid creation of Sequences using Dataset inputs"""
inputs = {
"PatientPosition": "HFS",
"PatientSetupNumber": "1",
"SetupTechniqueDescription": "",
}
patientSetups = Dataset()
patientSetups.update(inputs)
# Construct the sequence
seq = Sequence((patientSetups,))
assert isinstance(seq[0], Dataset)
def METHOD_NAME(self):
"""Sequence: Raise error if inputs are not iterables or Datasets"""
# Error on construction with single Dataset
with pytest.raises(TypeError):
Sequence(Dataset())
# Test for non-iterable
with pytest.raises(TypeError):
Sequence(1)
# Test for invalid iterable contents
with pytest.raises(TypeError):
Sequence([1, 2])
def testInvalidAssignment(self):
"""Sequence: validate exception for invalid assignment"""
seq = Sequence(
[
Dataset(),
]
)
# Attempt to assign an integer to the first element
with pytest.raises(TypeError):
seq.__setitem__(0, 1)
def testValidAssignment(self):
"""Sequence: ensure ability to assign a Dataset to a Sequence item"""
ds = Dataset()
ds.add_new((1, 1), "IS", 1)
# Create a single element Sequence first
seq = Sequence(
[
Dataset(),
]
)
seq[0] = ds
assert ds == seq[0]
def test_str(self):
"""Test string output of the sequence"""
ds = Dataset()
ds.BeamSequence = [Dataset()]
ds.BeamSequence[0].PatientName = "TEST"
ds.BeamSequence[0].PatientID = "12345"
out = str(ds.BeamSequence)
assert "[(0010, 0010) Patient's Name" in out
assert "PN: 'TEST'" in out
assert "(0010, 0020) Patient ID" in out
assert "LO: '12345']" in out
def test_adding_datasets(self):
"""Tests for adding datasets to the Sequence"""
ds_a = Dataset()
ds_a.Rows = 1
ds_b = Dataset()
ds_b.Rows = 2
ds_c = Dataset()
ds_c.Rows = 3
ds_d = Dataset()
ds_d.Rows = 4
ds_e = Dataset()
ds_e.Rows = 5
parent = Dataset()
parent.PatientName = "Parent"
seq = Sequence()
seq.parent_dataset = parent
assert isinstance(seq.parent_dataset, weakref.ReferenceType)
seq.append(ds_a)
seq.append(ds_c)
seq.insert(1, ds_b)
assert 3 == len(seq)
for ds in seq:
assert isinstance(ds.parent_seq, weakref.ReferenceType)
seq[1] = ds_e
assert ds_e == seq[1]
assert [ds_a, ds_e, ds_c] == seq
seq[1:1] = [ds_d]
assert [ds_a, ds_d, ds_e, ds_c] == seq
seq[1:2] = [ds_c, ds_e]
assert [ds_a, ds_c, ds_e, ds_e, ds_c] == seq
for ds in seq:
assert isinstance(ds.parent_seq, weakref.ReferenceType)
msg = r"Can only assign an iterable of 'Dataset'"
with pytest.raises(TypeError, match=msg):
seq[1:1] = ds_d
def test_extending(self):
"""Test Sequence.extend()."""
ds_a = Dataset()
ds_a.Rows = 1
ds_b = Dataset()
ds_b.Rows = 2
ds_c = Dataset()
ds_c.Rows = 3
ds_d = Dataset()
ds_d.Rows = 4
ds_e = Dataset()
ds_e.Rows = 5
parent = Dataset()
parent.PatientName = "Parent"
seq = Sequence()
seq.parent_dataset = parent
assert isinstance(seq.parent_dataset, weakref.ReferenceType)
seq.extend([ds_a, ds_b, ds_c])
assert [ds_a, ds_b, ds_c] == seq
msg = r"An iterable of 'Dataset' is required"
with pytest.raises(TypeError, match=msg):
seq.extend(ds_d)
assert [ds_a, ds_b, ds_c] == seq
seq.extend([ds_d, ds_e])
assert [ds_a, ds_b, ds_c, ds_d, ds_e] == seq
for ds in seq:
assert isinstance(ds.parent_seq, weakref.ReferenceType)
def test_iadd(self):
"""Test Sequence() += [Dataset()]."""
ds_a = Dataset()
ds_a.Rows = 1
ds_b = Dataset()
ds_b.Rows = 2
ds_c = Dataset()
ds_c.Rows = 3
ds_d = Dataset()
ds_d.Rows = 4
ds_e = Dataset()
ds_e.Rows = 5
parent = Dataset()
parent.PatientName = "Parent"
seq = Sequence()
seq.parent_dataset = parent
assert isinstance(seq.parent_dataset, weakref.ReferenceType)
seq += [ds_a, ds_b, ds_c]
assert [ds_a, ds_b, ds_c] == seq
msg = r"An iterable of 'Dataset' is required"
with pytest.raises(TypeError, match=msg):
seq += ds_d
assert [ds_a, ds_b, ds_c] == seq
seq += [ds_d, ds_e]
assert [ds_a, ds_b, ds_c, ds_d, ds_e] == seq
for ds in seq:
assert isinstance(ds.parent_seq, weakref.ReferenceType)
def test_deepcopy_sequence_subclass(self):
"""Regression test for #1813."""
class MySequenceSubclass(Sequence):
pass
my_sequence_subclass = MySequenceSubclass()
seq2 = copy.deepcopy(my_sequence_subclass)
assert seq2.__class__ is MySequenceSubclass |
298,872 | get options | """
Salt returner to return highstate stats to Librato
To enable this returner the minion will need the Librato
client importable on the Python path and the following
values configured in the minion or master config.
The Librato python client can be found at:
https://github.com/librato/python-librato
.. code-block:: yaml
librato.email: example@librato.com
librato.api_token: abc12345def
This return supports multi-dimension metrics for Librato. To enable
support for more metrics, the tags JSON object can be modified to include
other tags.
Adding EC2 Tags example:
If ec2_tags:region were desired within the tags for multi-dimension. The tags
could be modified to include the ec2 tags. Multiple dimensions are added simply
by adding more tags to the submission.
.. code-block:: python
pillar_data = __salt__['pillar.raw']()
q.add(metric.name, value, tags={'Name': ret['id'],'Region': pillar_data['ec2_tags']['Name']})
"""
import logging
import salt.returners
import salt.utils.jid
try:
import librato
HAS_LIBRATO = True
except ImportError:
HAS_LIBRATO = False
# Define the module's Virtual Name
__virtualname__ = "librato"
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBRATO:
return (
False,
"Could not import librato module; librato python client is not installed.",
)
return __virtualname__
def METHOD_NAME(ret=None):
"""
Get the Librato options from salt.
"""
attrs = {"email": "email", "api_token": "api_token", "api_url": "api_url"}
_options = salt.returners.get_returner_options(
__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__
)
_options["api_url"] = _options.get("api_url", "metrics-api.librato.com")
log.debug("Retrieved Librato options: %s", _options)
return _options
def _get_librato(ret=None):
"""
Return a Librato connection object.
"""
_options = METHOD_NAME(ret)
conn = librato.connect(
_options.get("email"),
_options.get("api_token"),
sanitizer=librato.sanitize_metric_name,
hostname=_options.get("api_url"),
)
log.info("Connected to librato.")
return conn
def _calculate_runtimes(states):
results = {"runtime": 0.00, "num_failed_states": 0, "num_passed_states": 0}
for state, resultset in states.items():
if isinstance(resultset, dict) and "duration" in resultset:
# Count the pass vs failures
if resultset["result"]:
results["num_passed_states"] += 1
else:
results["num_failed_states"] += 1
# Count durations
results["runtime"] += resultset["duration"]
log.debug("Parsed state metrics: %s", results)
return results
def returner(ret):
"""
Parse the return data and return metrics to Librato.
"""
librato_conn = _get_librato(ret)
q = librato_conn.new_queue()
if ret["fun"] == "state.highstate":
log.debug("Found returned Highstate data.")
# Calculate the runtimes and number of failed states.
stats = _calculate_runtimes(ret["return"])
log.debug("Batching Metric retcode with %s", ret["retcode"])
q.add("saltstack.highstate.retcode", ret["retcode"], tags={"Name": ret["id"]})
log.debug("Batching Metric num_failed_jobs with %s", stats["num_failed_states"])
q.add(
"saltstack.highstate.failed_states",
stats["num_failed_states"],
tags={"Name": ret["id"]},
)
log.debug(
"Batching Metric num_passed_states with %s", stats["num_passed_states"]
)
q.add(
"saltstack.highstate.passed_states",
stats["num_passed_states"],
tags={"Name": ret["id"]},
)
log.debug("Batching Metric runtime with %s", stats["runtime"])
q.add("saltstack.highstate.runtime", stats["runtime"], tags={"Name": ret["id"]})
log.debug(
"Batching Metric runtime with %s",
stats["num_failed_states"] + stats["num_passed_states"],
)
q.add(
"saltstack.highstate.total_states",
stats["num_failed_states"] + stats["num_passed_states"],
tags={"Name": ret["id"]},
)
log.info("Sending metrics to Librato.")
q.submit() |
298,873 | setup | from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.domains.std import GenericObject, StandardDomain
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util import ws_re
from sphinx import addnodes
from sphinx.util.docfields import Field
from docutils import nodes
def get_id_from_cfg(text):
'''
Formats anchor ID from config option.
'''
if text[:6] == '$cfg[\'':
text = text[6:]
if text[-2:] == '\']':
text = text[:-2]
text = text.replace('[$i]', '')
parts = text.split("']['")
return parts
class ConfigOption(ObjectDescription):
indextemplate = 'configuration option; %s'
parse_node = None
has_arguments = True
doc_field_types = [
Field('default', label='Default value', has_arg=False,
names=('default', )),
Field('type', label='Type', has_arg=False,
names=('type',)),
]
def handle_signature(self, sig, signode):
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetparts = get_id_from_cfg(name)
targetname = 'cfg_%s' % '_'.join(targetparts)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
indextype = 'single'
# Generic index entries
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname, None))
self.indexnode['entries'].append((indextype, name,
targetname, targetname, None))
# Server section
if targetparts[0] == 'Servers' and len(targetparts) > 1:
indexname = ', '.join(targetparts[1:])
self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname,
targetname, targetname, None))
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname, None))
else:
indexname = ', '.join(targetparts)
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname, None))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigSectionXRefRole(XRefRole):
"""
Cross-referencing role for configuration sections (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname, None),
('single', 'configuration section; %s' % varname, tgtid, varname, None)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigSection(ObjectDescription):
indextemplate = 'configuration section; %s'
parse_node = None
def handle_signature(self, sig, signode):
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname, None))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigOptionXRefRole(XRefRole):
"""
Cross-referencing role for configuration options (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname, None),
('single', 'configuration option; %s' % varname, tgtid, varname, None)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigFileDomain(Domain):
name = 'config'
label = 'Config'
object_types = {
'option': ObjType('config option', 'option'),
'section': ObjType('config section', 'section'),
}
directives = {
'option': ConfigOption,
'section': ConfigSection,
}
roles = {
'option': ConfigOptionXRefRole(),
'section': ConfigSectionXRefRole(),
}
initial_data = {
'objects': {}, # (type, name) -> docname, labelid
}
def clear_doc(self, docname):
toremove = []
for key, (fn, _) in self.data['objects'].items():
if fn == docname:
toremove.append(key)
for key in toremove:
del self.data['objects'][key]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
docname, labelid = self.data['objects'].get((typ, target), ('', ''))
if not docname:
return None
else:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def get_objects(self):
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
def METHOD_NAME(app):
app.add_domain(ConfigFileDomain) |
298,874 | cutoff date as long n days before | from syscore.constants import arg_not_supplied
from sysdata.mongodb.mongo_connection import mongoDb
from sysdata.mongodb.mongo_generic import mongoDataWithSingleKey
from syscore.dateutils import long_to_datetime, datetime_to_long
from syslogdiag.log_to_screen import logtoscreen
from syslogdiag.log_entry import (
LEVEL_ID,
TIMESTAMP_ID,
TEXT_ID,
LOG_RECORD_ID,
logEntry,
)
from syslogdiag._DEPRECATED.database_log import logData
from copy import copy
import datetime
LOG_COLLECTION_NAME = "Logs"
class mongoLogData(logData):
def __init__(
self, mongo_db: mongoDb = arg_not_supplied, log=logtoscreen("mongoLogData")
):
self._mongo_data = mongoDataWithSingleKey(
collection_name=LOG_COLLECTION_NAME,
key_name=LOG_RECORD_ID,
mongo_db=mongo_db,
)
super().__init__(log=log)
@property
def mongo_data(self):
return self._mongo_data
def get_log_items_as_entries(
self, attribute_dict: dict = arg_not_supplied, lookback_days: int = 1
):
"""
Return log items not as text, good for diagnostics
:param attribute_dict: dictionary of attributes to return logs for
:return: list of 4-typles: timestamp, level, text, attributes
"""
if attribute_dict is arg_not_supplied:
attribute_dict = {}
attribute_dict = add_after_n_days_to_attribute_dict(
attribute_dict, lookback_days=lookback_days
)
results_list = self.mongo_data.get_list_of_result_dict_for_custom_dict(
attribute_dict
)
# ... to list of log entries
list_of_log_items = [
mongoLogEntry.log_entry_from_dict(single_log_dict)
for single_log_dict in results_list
]
# sort by log ID
list_of_log_items.sort(key=lambda x: x._log_id)
return list_of_log_items
def delete_log_items_from_before_n_days(self, lookback_days=365):
# need something to delete old log records, eg more than x months ago
attribute_dict = add_before_n_days_to_attribute_dict(
{}, lookback_days=lookback_days
)
self.mongo_data.delete_data_with_any_warning_for_custom_dict(attribute_dict)
def add_before_n_days_to_attribute_dict(
attribute_dict: dict, lookback_days: int
) -> dict:
attribute_dict = add_timestamp_cutoff_to_attribute_dict(
attribute_dict=attribute_dict,
lookback_days=lookback_days,
greater_or_less_than="$lt",
)
return attribute_dict
def add_after_n_days_to_attribute_dict(
attribute_dict: dict, lookback_days: int
) -> dict:
attribute_dict = add_timestamp_cutoff_to_attribute_dict(
attribute_dict=attribute_dict,
lookback_days=lookback_days,
greater_or_less_than="$gt",
)
return attribute_dict
def add_timestamp_cutoff_to_attribute_dict(
attribute_dict: dict, lookback_days: int, greater_or_less_than: str = "$gt"
) -> dict:
assert greater_or_less_than in ["$gt", "$lt"]
timestamp_dict = {}
timestamp_dict[greater_or_less_than] = METHOD_NAME(
lookback_days
)
attribute_dict[TIMESTAMP_ID] = timestamp_dict
return attribute_dict
def METHOD_NAME(lookback_days: int) -> int:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=lookback_days)
return datetime_to_long(cutoff_date)
class mongoLogEntry(logEntry):
@classmethod
def log_entry_from_dict(mongoLogEntry, log_dict_input: dict):
"""
Starting with the dictionary representation, recover the original logEntry
:param log_dict: dict, as per logEntry.log_dict()
:return: logEntry object
"""
log_dict = copy(log_dict_input)
log_timestamp_aslong = log_dict.pop(TIMESTAMP_ID)
msg_level = log_dict.pop(LEVEL_ID)
text = log_dict.pop(TEXT_ID)
log_id = log_dict.pop(LOG_RECORD_ID)
attributes = log_dict
log_timestamp = long_to_datetime(log_timestamp_aslong)
log_entry = mongoLogEntry(
text,
log_timestamp=log_timestamp,
msglevel=msg_level,
attributes=attributes,
log_id=log_id,
)
return log_entry |
298,875 | execute and serialize | from typing import Callable, Dict, List, Tuple, Union
import sqlalchemy as sa
from szurubooru import db, errors, model, rest
from szurubooru.func import cache
from szurubooru.search import parser, tokens
from szurubooru.search.configs.base_search_config import BaseSearchConfig
from szurubooru.search.query import SearchQuery
from szurubooru.search.typing import SaQuery
def _format_dict_keys(source: Dict) -> List[str]:
return list(sorted(source.keys()))
def _get_order(order: str, default_order: str) -> Union[bool, str]:
if order == tokens.SortToken.SORT_DEFAULT:
return default_order or tokens.SortToken.SORT_ASC
if order == tokens.SortToken.SORT_NEGATED_DEFAULT:
if default_order == tokens.SortToken.SORT_ASC:
return tokens.SortToken.SORT_DESC
elif default_order == tokens.SortToken.SORT_DESC:
return tokens.SortToken.SORT_ASC
assert False
return order
class Executor:
"""
Class for search parsing and execution. Handles plaintext parsing and
delegates sqlalchemy filter decoration to SearchConfig instances.
"""
def __init__(self, search_config: BaseSearchConfig) -> None:
self.config = search_config
self.parser = parser.Parser()
def get_around(
self, query_text: str, entity_id: int
) -> Tuple[model.Base, model.Base]:
search_query = self.parser.parse(query_text)
self.config.on_search_query_parsed(search_query)
filter_query = self.config.create_around_query().options(
sa.orm.lazyload("*")
)
filter_query = self._prepare_db_query(
filter_query, search_query, False
)
prev_filter_query = (
filter_query.filter(self.config.id_column > entity_id)
.order_by(None)
.order_by(sa.func.abs(self.config.id_column - entity_id).asc())
.limit(1)
)
next_filter_query = (
filter_query.filter(self.config.id_column < entity_id)
.order_by(None)
.order_by(sa.func.abs(self.config.id_column - entity_id).asc())
.limit(1)
)
return (
prev_filter_query.one_or_none(),
next_filter_query.one_or_none(),
)
def get_around_and_serialize(
self,
ctx: rest.Context,
entity_id: int,
serializer: Callable[[model.Base], rest.Response],
) -> rest.Response:
entities = self.get_around(
ctx.get_param_as_string("query", default=""), entity_id
)
return {
"prev": serializer(entities[0]),
"next": serializer(entities[1]),
}
def execute(
self, query_text: str, offset: int, limit: int
) -> Tuple[int, List[model.Base]]:
search_query = self.parser.parse(query_text)
self.config.on_search_query_parsed(search_query)
if offset < 0:
limit = max(0, limit + offset)
offset = 0
disable_eager_loads = False
for token in search_query.sort_tokens:
if token.name == "random":
disable_eager_loads = True
key = (id(self.config), hash(search_query), offset, limit)
if cache.has(key):
return cache.get(key)
filter_query = self.config.create_filter_query(disable_eager_loads)
filter_query = filter_query.options(sa.orm.lazyload("*"))
filter_query = self._prepare_db_query(filter_query, search_query, True)
entities = filter_query.offset(offset).limit(limit).all()
count_query = self.config.create_count_query(disable_eager_loads)
count_query = count_query.options(sa.orm.lazyload("*"))
count_query = self._prepare_db_query(count_query, search_query, False)
count_statement = count_query.statement.with_only_columns(
[sa.func.count()]
).order_by(None)
count = db.session.execute(count_statement).scalar()
ret = (count, entities)
cache.put(key, ret)
return ret
def METHOD_NAME(
self,
ctx: rest.Context,
serializer: Callable[[model.Base], rest.Response],
) -> rest.Response:
query = ctx.get_param_as_string("query", default="")
offset = ctx.get_param_as_int("offset", default=0, min=0)
limit = ctx.get_param_as_int("limit", default=100, min=1, max=100)
count, entities = self.execute(query, offset, limit)
return {
"query": query,
"offset": offset,
"limit": limit,
"total": count,
"results": list([serializer(entity) for entity in entities]),
}
def _prepare_db_query(
self, db_query: SaQuery, search_query: SearchQuery, use_sort: bool
) -> SaQuery:
for anon_token in search_query.anonymous_tokens:
if not self.config.anonymous_filter:
raise errors.SearchError(
"Anonymous tokens are not valid in this context."
)
db_query = self.config.anonymous_filter(
db_query, anon_token.criterion, anon_token.negated
)
for named_token in search_query.named_tokens:
if named_token.name not in self.config.named_filters:
raise errors.SearchError(
"Unknown named token: %r. Available named tokens: %r."
% (
named_token.name,
_format_dict_keys(self.config.named_filters),
)
)
db_query = self.config.named_filters[named_token.name](
db_query, named_token.criterion, named_token.negated
)
for sp_token in search_query.special_tokens:
if sp_token.value not in self.config.special_filters:
raise errors.SearchError(
"Unknown special token: %r. "
"Available special tokens: %r."
% (
sp_token.value,
_format_dict_keys(self.config.special_filters),
)
)
db_query = self.config.special_filters[sp_token.value](
db_query, None, sp_token.negated
)
if use_sort:
for sort_token in search_query.sort_tokens:
if sort_token.name not in self.config.sort_columns:
raise errors.SearchError(
"Unknown sort token: %r. "
"Available sort tokens: %r."
% (
sort_token.name,
_format_dict_keys(self.config.sort_columns),
)
)
column, default_order = self.config.sort_columns[
sort_token.name
]
order = _get_order(sort_token.order, default_order)
if order == sort_token.SORT_ASC:
db_query = db_query.order_by(column.asc())
elif order == sort_token.SORT_DESC:
db_query = db_query.order_by(column.desc())
db_query = self.config.finalize_query(db_query)
return db_query |
298,876 | main | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
def METHOD_NAME():
# parties config
guest = 9999
host = 10000
arbiter = 10000
# specify input data name & namespace in database
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_eval_data = {"name": "breast_hetero_host", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role="guest", party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
# define DataTransform component
data_transform_0 = DataTransform(name="data_transform_0")
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role="guest", party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
# define HeteroLR component
hetero_lr_0 = HeteroLR(name="hetero_lr_0",
early_stop="diff",
learning_rate=0.15,
optimizer="rmsprop",
max_iter=10)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# set train data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
import json
print(json.dumps(pipeline.get_component("hetero_lr_0").get_summary(), indent=4))
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_lr_0])
# initiate predict pipeline
predict_pipeline = PipeLine()
# define new data reader
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_eval_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_eval_data)
# define evaluation component
evaluation_0 = Evaluation(name="evaluation_0")
evaluation_0.get_party_instance(role="guest", party_id=guest).component_param(need_run=True, eval_type="binary")
evaluation_0.get_party_instance(role="host", party_id=host).component_param(need_run=False)
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_1)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_1.output.data}))
# add evaluation component to predict pipeline
predict_pipeline.add_component(evaluation_0, data=Data(data=pipeline.hetero_lr_0.output.data))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
METHOD_NAME() |
298,877 | set vars | import json
import os
import pathlib
import time
from typing import Any, Dict, List
import hcl2
from python_terraform import IsFlagged, Terraform, TerraformCommandError, Tfstate
from retry import retry
from consts import consts, env_defaults
from service_client import log
class _Terraform(Terraform):
"""python_terraform.Terraform always set the force flag (even to false) causing
destroy failures on some cases. This class overrides the destroy method and set the
force flag only if it's set to true"""
def destroy(self, dir_or_plan=None, force=False, **kwargs):
capture_output = os.getenv("DEBUG_TERRAFORM") is not None
default = kwargs
if force:
default["force"] = force
options = self._generate_default_options(default)
args = self._generate_default_args(dir_or_plan)
return self.cmd("destroy", *args, **options, capture_output=capture_output)
class TerraformUtils:
def __init__(self, working_dir: str, terraform_init: bool = True):
log.info("TF FOLDER %s ", working_dir)
self.working_dir = working_dir
self.var_file_path = os.path.join(working_dir, consts.TFVARS_JSON_NAME)
self.tf = _Terraform(
working_dir=working_dir,
state=consts.TFSTATE_FILE,
var_file=consts.TFVARS_JSON_NAME,
is_env_vars_included=True,
)
if terraform_init:
self.init_tf()
@retry(exceptions=TerraformCommandError, tries=10, delay=10)
def init_tf(self) -> None:
self.tf.cmd("init", raise_on_error=True, capture_output=True)
def select_defined_variables(self, **kwargs):
supported_variables = self.get_variable_list()
return {k: v for k, v in kwargs.items() if v is not None and k in supported_variables}
def get_variable_list(self):
results = list()
for tf_file in pathlib.Path(self.working_dir).glob("*.tf"):
with open(tf_file, "r") as fp:
terraform_file_dict = hcl2.load(fp)
results += terraform_file_dict["variable"] if "variable" in terraform_file_dict else list()
return list(map(lambda d: next(iter(d)), results))
def apply(
self,
refresh: bool = True,
capture_output: bool = True,
attempts: int = env_defaults.TF_APPLY_ATTEMPTS,
interval: int = consts.TF_APPLY_ATTEMPTS_INTERVAL,
) -> None:
if os.getenv("DEBUG_TERRAFORM") is not None:
capture_output = False
return_value, output, err = self.tf.apply(
no_color=IsFlagged, refresh=refresh, input=False, skip_plan=True, capture_output=capture_output
)
if return_value == 0:
return
message = f"Terraform apply failed with return value {return_value}, output {output} , error {err}"
if attempts == 1:
log.error(message)
raise Exception(message)
log.warning(message)
log.info(f"Attempting to re-apply terraform target (left attempts: {attempts})...")
time.sleep(interval)
return self.apply(refresh, capture_output, attempts - 1, interval * 2)
def METHOD_NAME(self, **kwargs) -> None:
defined_variables = self.select_defined_variables(**kwargs)
self.update_variables_file(defined_variables)
def set_and_apply(self, refresh: bool = True, **kwargs) -> None:
self.METHOD_NAME(**kwargs)
self.init_tf()
self.apply(refresh=refresh)
def update_variables_file(self, variables: Dict[str, str]):
with open(self.var_file_path, "r+") as _file:
tfvars = json.load(_file)
tfvars.update(variables)
_file.seek(0)
_file.truncate()
json.dump(tfvars, _file)
def change_variables(self, variables: Dict[str, str], refresh: bool = True) -> None:
self.update_variables_file(variables=variables)
self.apply(refresh=refresh)
def get_state(self) -> Tfstate:
self.tf.read_state_file(consts.TFSTATE_FILE)
return self.tf.tfstate
def get_resources(self, resource_type: str = None) -> List[Dict[str, Any]]:
state = self.get_state()
resources = [resource for resource in getattr(state, "resources", {})]
return [resource for resource in resources if resource_type is None or resource["type"] == resource_type]
def set_new_vips(self, api_vip: str, ingress_vip: str) -> None:
self.change_variables(variables={"api_vip": api_vip, "ingress_vip": ingress_vip}, refresh=True)
def destroy(self, force: bool = True) -> None:
self.tf.destroy(force=force, input=False, auto_approve=True) |
298,878 | latest report info dir | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron job to get the latest code coverage stats and HTML reports."""
import datetime
import json
import os
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.metrics import logs
def METHOD_NAME(bucket):
"""Returns a GCS URL to the latest report info for the given bucket."""
return 'gs://{0}/latest_report_info/'.format(bucket)
def _basename(gcs_path):
"""Returns the basename for the given path without file extension."""
return os.path.splitext(os.path.basename(gcs_path))[0]
def _read_json(url):
"""Returns a JSON obejct loaded from the given GCS url."""
data = storage.read_data(url)
result = None
try:
result = json.loads(data)
except Exception as e:
logs.log_warn(
'Empty or malformed code coverage JSON (%s): %s.' % (url, str(e)))
return result
def _coverage_information(summary_path, name, report_info):
"""Returns a CoverageInformation entity with coverage stats populated."""
date = datetime.datetime.strptime(
report_info['report_date'],
data_types.COVERAGE_INFORMATION_DATE_FORMAT).date()
# |name| can be either a project qualified fuzz target name or a project name.
cov_info = data_handler.get_coverage_information(
name, date, create_if_needed=True)
cov_info.fuzzer = name
cov_info.date = date
# Link to a per project report as long as we don't have per fuzzer reports.
cov_info.html_report_url = report_info['html_report_url']
summary = _read_json(summary_path)
if not summary:
# We can encounter empty JSON files for broken fuzz targets.
return cov_info
try:
# Don't rely on the coverage data being well-formatted. Otherwise new
# languages can break everything else.
total_stats = summary['data'][0]['totals']
cov_info.functions_covered = total_stats['functions']['covered']
cov_info.functions_total = total_stats['functions']['count']
cov_info.edges_covered = total_stats['regions']['covered']
cov_info.edges_total = total_stats['regions']['count']
return cov_info
except KeyError:
logs.log_error('Malformed code coverage for %s.' % name)
return None
def _process_fuzzer_stats(fuzzer, project_info, project_name, bucket):
"""Processes coverage stats for a single fuzz target."""
fuzzer_name = data_types.fuzz_target_project_qualified_name(
project_name, _basename(fuzzer))
fuzzer_info_path = storage.get_cloud_storage_file_path(bucket, fuzzer)
logs.log(
'Processing fuzzer stats for %s (%s).' % (fuzzer_name, fuzzer_info_path))
return _coverage_information(fuzzer_info_path, fuzzer_name, project_info)
def _process_project_stats(project_info, project_name):
"""Processes coverage stats for a single project."""
summary_path = project_info['report_summary_path']
logs.log('Processing total stats for %s project (%s).' % (project_name,
summary_path))
return _coverage_information(summary_path, project_name, project_info)
def _process_project(project_name, latest_project_info_url, bucket):
"""Collects coverage information for all fuzz targets in the given project and
the total stats for the project."""
logs.log('Processing coverage for %s project.' % project_name)
report_info = _read_json(latest_project_info_url)
if not report_info:
logs.log_warn('Skipping code coverage for %s project.' % project_name)
return
# Iterate through report_info['fuzzer_stats_dir'] and prepare
# CoverageInformation entities for invididual fuzz targets.
entities = []
for fuzzer in storage.list_blobs(
report_info['fuzzer_stats_dir'], recursive=False):
fuzzer_stats = _process_fuzzer_stats(fuzzer, report_info, project_name,
bucket)
if fuzzer_stats:
entities.append(fuzzer_stats)
logs.log('Processed coverage for %d targets in %s project.' % (len(entities),
project_name))
# Prepare CoverageInformation entity for the total project stats.
project_stats = _process_project_stats(report_info, project_name)
if project_stats:
entities.append(project_stats)
ndb_utils.put_multi(entities)
def collect_fuzzer_coverage(bucket):
"""Actual implementation of the fuzzer coverage task."""
url = METHOD_NAME(bucket)
for latest_project_report_info_path in storage.list_blobs(
url, recursive=False):
project = _basename(latest_project_report_info_path)
latest_project_info_url = storage.get_cloud_storage_file_path(
bucket,
latest_project_report_info_path) # Path is relative to the bucket.
_process_project(project, latest_project_info_url, bucket)
def main():
"""Collects the latest code coverage stats and links to reports."""
# The task is supposed to be super reliable and never fail. If anything goes
# wrong, we just fail with the exception going straight into StackDriver.
logs.log('FuzzerCoverage task started.')
bucket = local_config.ProjectConfig().get('coverage.reports.bucket')
if not bucket:
logs.log_error(
'Coverage bucket is not specified. Skipping FuzzerCoverage task.')
return False
collect_fuzzer_coverage(bucket)
logs.log('FuzzerCoverage task finished successfully.')
return True |
298,879 | evaluate graph | # /usr/bin/env python2.7
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""
Load a trained network and modify it to enable BFS. Add in beta switches after
the relu layers. Create a new solver which includes an architecture loss.
"""
from itertools import compress
import tensorflow as tf
from aimet_common.utils import AimetLogger
log = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
def initialize_uninitialized_vars(sess):
"""
Some graphs have variables created after training that need to be initialized (eg SVD/Quantization).
However, in pre-trained graphs we don't want to reinitialize variables that are already
which would overwrite the values obtained during training. Therefore search for all
uninitialized variables and initialize ONLY those variables.
:param sess: tf.compat.v1.Session
:return:
"""
with sess.graph.as_default():
global_vars = tf.compat.v1.global_variables()
is_not_initialized = sess.run([~(tf.compat.v1.is_variable_initialized(var)) for var in global_vars])
uninitialized_vars = list(compress(global_vars, is_not_initialized))
if uninitialized_vars:
log.info('Initializing uninitialized variables')
sess.run(tf.compat.v1.variables_initializer(uninitialized_vars))
def default_eval_func(data):
"""
Evaluates the graph for accuracy. Returns the accuracy based on the current
data iteration. The default "accuracy" should always be the first entry in the list
provided to eval_names.
:param data:
:return:
"""
if len(data) > 1:
print('default evaluation function only expected 1 output, accuracy. Using first datum')
# Return the accuracy
return data[0][1]
def METHOD_NAME(session, generator, eval_names, eval_func, iterations):
"""
Evaluates the graph's performance by running data through the network
and calling an evaluation function to generate the performance metric.
:param session: The tensorflow session that contains the graph
:param generator: The data generator providing the network with batch data
:param eval_names: The names providing the nodes on which the network's performance should be judged
:param eval_func: The customized function to evaluate the performance of the network
:param iterations: The number of iterations (batches) to run through the network
:return:
"""
# Ensure any uninitialized variables are initialized
initialize_uninitialized_vars(session)
# Get the first batch and ue it to create the tensor map
t_map = _create_map_of_input_tensors(generator, session)
eval_outputs = []
for name in eval_names:
op = session.graph.get_operation_by_name(name)
eval_outputs.append(op.outputs[0])
# Run the graph and verify the data is being updated properly for each iteration
avg_metric = 0
log.info("Evaluating graph for %i iterations", iterations)
for _, batch in zip(range(iterations), generator):
# Setup the feed dictionary
feed_dict = {}
for name, data in batch.items():
feed_dict[t_map[name]] = data
output_data = session.run(eval_outputs, feed_dict=feed_dict)
avg_metric += eval_func(list(zip(eval_names, output_data)))
log.info("Completed graph evaluation for %i iterations", iterations)
return avg_metric / iterations
def _create_map_of_input_tensors(generator, session):
t_map = {}
inputs = generator.get_data_inputs() + generator.get_validation_inputs()
for name in inputs:
t_map[name] = session.graph.get_tensor_by_name(name + ':0')
return t_map |
298,880 | assert identical | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
# @author Jan-Lukas Wynen
"""Custom assertions for pytest-based tests.
To get the best error messages, tell pytest to rewrite assertions in this module.
Place the following code in your ``conftest.py``:
.. code-block:: python
pytest.register_assert_rewrite('scipp.testing.assertions')
"""
from contextlib import contextmanager
from typing import Any, Iterator, Mapping, TypeVar
import numpy as np
from ..core import DataArray, DataGroup, Dataset, Variable
from ..core.comparison import identical
# Exception notes are formatted as 'PREPOSITION {loc}',
# where 'loc' is set by the concrete assertion functions to indicate coords, attrs, etc.
# 'PREPOSITION' is replaced at the top level to produce exception messages like:
#
# [...]
# in coord 'x'
# of data group item 'b'
# of data group item 'a'
T = TypeVar('T')
def METHOD_NAME(a: T, b: T) -> None:
"""Raise an AssertionError if two objects are not identical.
For Scipp objects, ``assert_identical(a, b)`` is equivalent to
``assert sc.identical(a, b, equal_nan=True)`` but produces a more precise
error message in pytest.
If this function is called with arguments that are not supported by
:func:`scipp.identical`, it calls ``assert a == b``.
This function requires exact equality including equal types.
For example, ``assert_identical(1, 1.0)`` will raise.
NaN elements of Scipp variables are treated as equal.
Parameters
----------
a:
The actual object to check.
b:
The desired, expected object.
Raises
------
AssertionError
If the objects are not identical.
"""
try:
_assert_identical_impl(a, b)
except AssertionError as exc:
if hasattr(exc, '__notes__'):
# See comment above.
notes = []
rest = -1
for i, note in enumerate(exc.__notes__):
if 'PREPOSITION' in note:
notes.append(note.replace('PREPOSITION', 'in'))
rest = i
break
notes.extend(
note.replace('PREPOSITION', 'of') for note in exc.__notes__[rest + 1 :]
)
exc.__notes__ = notes
raise
def _assert_identical_impl(a: T, b: T) -> None:
assert type(a) == type(b)
if isinstance(a, Variable):
_assert_identical_variable(a, b)
elif isinstance(a, DataArray):
_assert_identical_data_array(a, b)
elif isinstance(a, Dataset):
_assert_identical_dataset(a, b)
elif isinstance(a, DataGroup):
_assert_identical_datagroup(a, b)
else:
assert a == b
def _assert_identical_variable(a: Variable, b: Variable) -> None:
assert a.sizes == b.sizes
assert a.unit == b.unit
assert a.dtype == b.dtype
assert (a.bins is None) == (b.bins is None)
if a.bins is None:
_assert_identical_dense_variable_data(a, b)
else:
_assert_identical_binned_variable_data(a, b)
def _assert_identical_binned_variable_data(a: Variable, b: Variable) -> None:
# Support for iterating over bin contents is limited in Python.
# So, simply use `identical` even though it does not produce good error messages.
assert a.bins.unit == b.bins.unit
assert identical(a, b)
def _assert_identical_dense_variable_data(a: Variable, b: Variable) -> None:
with _add_note('values'):
np.testing.assert_array_equal(
a.values, b.values, err_msg='when comparing values'
)
if a.variances is not None:
assert b.variances is not None, 'a has variances but b does not'
with _add_note('variances'):
np.testing.assert_array_equal(
a.variances, b.variances, err_msg='when comparing variances'
)
else:
assert b.variances is None, 'a has no variances but b does'
def _assert_identical_data_array(a: DataArray, b: DataArray) -> None:
_assert_identical_variable(a.data, b.data)
_assert_mapping_eq(a.coords, b.coords, 'coord')
_assert_mapping_eq(a.attrs, b.attrs, 'attr')
_assert_mapping_eq(a.masks, b.masks, 'mask')
def _assert_identical_dataset(a: Dataset, b: Dataset) -> None:
_assert_mapping_eq(a, b, 'dataset item')
def _assert_identical_datagroup(a: DataGroup, b: DataGroup) -> None:
_assert_mapping_eq(a, b, 'data group item')
def _assert_mapping_eq(
a: Mapping[str, Any], b: Mapping[str, Any], map_name: str
) -> None:
with _add_note(map_name + 's'):
assert a.keys() == b.keys()
for name, var_a in a.items():
with _add_note("{} '{}'", map_name, name):
_assert_identical_impl(var_a, b[name])
@contextmanager
def _add_note(loc: str, *args: str) -> Iterator[None]:
try:
yield
except AssertionError as exc:
if hasattr(exc, 'add_note'):
# Needs Python >= 3.11
exc.add_note(f'PREPOSITION {loc.format(*args)}')
raise
__all__ = ['assert_identical'] |
298,881 | logic app resource id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetActionResult',
'AwaitableGetActionResult',
'get_action',
'get_action_output',
]
@pulumi.output_type
class GetActionResult:
"""
Action for alert rule.
"""
def __init__(__self__, etag=None, id=None, METHOD_NAME=None, name=None, system_data=None, type=None, workflow_id=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'logic_app_resource_id' to be a str")
pulumi.set(__self__, "logic_app_resource_id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workflow_id and not isinstance(workflow_id, str):
raise TypeError("Expected argument 'workflow_id' to be a str")
pulumi.set(__self__, "workflow_id", workflow_id)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="logicAppResourceId")
def METHOD_NAME(self) -> str:
"""
Logic App Resource Id, /subscriptions/{my-subscription}/resourceGroups/{my-resource-group}/providers/Microsoft.Logic/workflows/{my-workflow-id}.
"""
return pulumi.get(self, "logic_app_resource_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workflowId")
def workflow_id(self) -> Optional[str]:
"""
The name of the logic app's workflow.
"""
return pulumi.get(self, "workflow_id")
class AwaitableGetActionResult(GetActionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActionResult(
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
system_data=self.system_data,
type=self.type,
workflow_id=self.workflow_id)
def get_action(action_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_id: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActionResult:
"""
Gets the action of alert rule.
:param str action_id: Action ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: Alert rule ID
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['actionId'] = action_id
__args__['resourceGroupName'] = resource_group_name
__args__['ruleId'] = rule_id
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230801preview:getAction', __args__, opts=opts, typ=GetActionResult).value
return AwaitableGetActionResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'logic_app_resource_id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'),
workflow_id=pulumi.get(__ret__, 'workflow_id'))
@_utilities.lift_output_func(get_action)
def get_action_output(action_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActionResult]:
"""
Gets the action of alert rule.
:param str action_id: Action ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: Alert rule ID
:param str workspace_name: The name of the workspace.
"""
... |
298,882 | db metrics | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import json
import pprint
import re
import time
import boto3
import click
import tabulate
import yaml
from c7n_sphere11.cli import BASE_URL
from c7n_sphere11.client import Client
from c7n.utils import local_session
@click.group()
def admin():
"""Sphere11, resource locks"""
@admin.command()
@click.option('--config')
def format_json(config):
"""format config for lambda exec
"""
with open(config) as fh:
print(json.dumps(yaml.safe_load(fh.read()), indent=2))
def render_metrics(header, values):
if not values:
return
click.echo(
"".join((
" ",
header.ljust(20),
("min:%0.1f" % min(values)).ljust(12),
("max:%0.1f" % max(values)).ljust(12),
raster_metrics(values))))
def raster_metrics(data):
BARS = u'▁▂▃▄▅▆▇█'
incr = min(data)
width = (max(data) - min(data)) / (len(BARS) - 1)
bins = [i * width + incr for i in range(len(BARS))]
indexes = [i for n in data
for i, thres in enumerate(bins)
if thres <= n < thres + width]
return ''.join(BARS[i] for i in indexes)
@admin.command()
def check():
"""Sanity check api deployment
"""
t = time.time()
results = Client(BASE_URL).version()
print("Endpoint", BASE_URL)
print("Response Time %0.2f" % (time.time() - t))
print("Headers")
for k, v in results.headers.items():
print(" %s: %s" % (k, v))
print("Body")
print(results.text)
@admin.command()
@click.option('--function', help='function name', required=True)
@click.option('--api', help='api name')
@click.option(
'-s', '--start', help='relative time to start from', default="1h")
@click.option(
'-p', '--period', help='metrics period', default="1m")
def metrics(function, api, start, period):
"""lambda/api/db metrics"""
from c7n.mu import LambdaManager
manager = LambdaManager(boto3.Session)
start = parse_date(start)
period = int(abs(parse_timedelta(period).total_seconds()))
print("Lambda Metrics")
metrics = manager.metrics(
[{'FunctionName': function}],
start=start, end=datetime.utcnow(),
period=period)
for k in ('Invocations', 'Throttles', 'Errors'):
values = [n['Sum'] for n in metrics[0][k]]
render_metrics(k, values)
if not api:
return
print("Api Metrics")
metrics = gateway_metrics(
boto3.Session, api, "latest", start, datetime.utcnow(), period)
for k, data in metrics.items():
if "Count" in k:
values = [n['Sum'] for n in data]
else:
values = [n['Average'] for n in data]
render_metrics(k, values)
print("Db Metrics")
metrics = METHOD_NAME(
boto3.Session, "Sphere11.Dev.ResourceLocks",
start, datetime.utcnow(), period)
for k, data in metrics.items():
values = [n['Average'] for n in data]
render_metrics(k, values)
def METHOD_NAME(session_factory, table_name, start, end, period):
metrics = local_session(session_factory).client('cloudwatch')
values = {}
for m in (
"ConsumedReadCapacityUnits",
"ConsumedWriteCapacityUnits",
"ThrottledRequests",
"ReadThrottleEvents",
"WriteThrottleEvents",
"ReturnedItemCount",
"SuccessfulRequestLatency"
# "ReturnedRecordsCount"
):
values[m.replace('Capacity', '')] = metrics.get_metric_statistics(
Namespace="AWS/DynamoDB",
Dimensions=[
{'Name': 'TableName', 'Value': table_name}
],
Statistics=["Average"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
def gateway_metrics(session_factory, gateway_id, stage_name, start, end, period):
metrics = local_session(session_factory).client('cloudwatch')
values = {}
for m in ("4XXError", "5XError",
"CacheHitCount", "CacheMissCount",
"Count",
"IntegrationLatency", "Latency"):
values[m] = metrics.get_metric_statistics(
Namespace="AWS/ApiGateway",
Dimensions=[
{'Name': 'ApiName', 'Value': gateway_id},
{'Name': 'Stage', 'Value': stage_name},
],
Statistics=["Average", "Sum"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
def parse_timedelta(datetime_text, default=timedelta(seconds=60 * 5 * -1)):
# from awslogs script
ago_regexp = r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?'
ago_match = re.match(ago_regexp, datetime_text)
if ago_match:
amount, unit = ago_match.groups()
amount = int(amount)
unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
delta = timedelta(seconds=unit * amount * -1)
else:
delta = -default
return delta
def parse_date(datetime_text):
return datetime.utcnow() + parse_timedelta(datetime_text)
@admin.command()
@click.option('--account-id', help='account id')
def records(account_id):
"""Fetch locks data
"""
s = boto3.Session()
table = s.resource('dynamodb').Table('Sphere11.Dev.ResourceLocks')
results = table.scan()
for r in results['Items']:
if 'LockDate' in r:
r['LockDate'] = datetime.fromtimestamp(r['LockDate'])
if 'RevisionDate' in r:
r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate'])
print(tabulate.tabulate(
results['Items'],
headers="keys",
tablefmt='fancy_grid'))
@admin.command()
@click.option('--function', help='function name', required=True)
def flush_pending(function):
"""Attempt to acquire any pending locks.
"""
s = boto3.Session()
client = s.client('lambda')
results = client.invoke(
FunctionName=function,
Payload=json.dumps({'detail-type': 'Scheduled Event'})
)
content = results.pop('Payload').read()
pprint.pprint(results)
pprint.pprint(json.loads(content))
@admin.command()
def config_status():
""" Check config status in an account.
"""
s = boto3.Session()
client = s.client('config')
channels = client.describe_delivery_channel_status()[
'DeliveryChannelsStatus']
for c in channels:
print(yaml.safe_dump({
c['name']: dict(
snapshot=str(
c['configSnapshotDeliveryInfo'].get('lastSuccessfulTime')),
history=str(
c['configHistoryDeliveryInfo'].get('lastSuccessfulTime')),
stream=str(
c['configStreamDeliveryInfo'].get('lastStatusChangeTime'))
),
}, default_flow_style=False))
@admin.command()
@click.option('--account-id', required=True)
@click.option('--region', required=True)
def delta(account_id, region):
print(Client(BASE_URL).delta(account_id, region).text)
@admin.command()
@click.option('--reload/--no-reload', default=True)
@click.option('--port', default=8080)
def local(reload, port):
"""run local app server, assumes into the account
"""
import logging
from bottle import run
from app import controller, app
from c7n.resources import load_resources
load_resources()
print("Loaded resources definitions")
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
if controller.db.provision():
print("Table Created")
run(app, reloader=reload, port=port)
if __name__ == '__main__':
admin() |
298,883 | parse args | import re
import sys
import os
class G:
opts = {'jobname': '-', 'compiler': '-', 'jenkins_configure': '-', 'label': '-', 'netmod': '-'}
states = []
xfails = {}
class RE:
m = None
def match(pat, str, flags=0):
RE.m = re.match(pat, str, flags)
return RE.m
def search(pat, str, flags=0):
RE.m = re.search(pat, str, flags)
return RE.m
def main():
METHOD_NAME()
load_xfail_conf()
if os.path.exists('test/mpi'):
os.chdir('test/mpi')
apply_xfails()
# ---- subroutines --------------------------------------------
def METHOD_NAME():
opt_names = {'j': "jobname", 'c': "compiler", 'o': "jenkins_configure", 'q': "label", 'm': "netmod", 'f': "XFAIL_CONF"}
last_opt = ''
for a in sys.argv[1:]:
if last_opt:
G.opts[last_opt] = a
last_opt = ""
elif RE.match(r'-(\w)$', a):
if RE.m.group(1) in opt_names:
last_opt = opt_names[RE.m.group(1)]
else:
last_opt = RE.m.group(1)
elif RE.match(r'--(\w+)=(.*)', a):
G.opts[RE.m.group(1)] = RE.m.group(2)
elif os.path.exists(a):
G.opts['XFAIL_CONF'] = a
else:
raise Exception("Unrecognized option [%s]\n" % a)
if 'dir' in G.opts:
os.chdir(G.opts['dir'])
G.states = (G.opts['jobname'], G.opts['compiler'], G.opts['jenkins_configure'], G.opts['netmod'], G.opts['label'])
if 'XFAIL_CONF' not in G.opts:
raise Exception("%s: missing -f XFAIL_CONF\n" % 0)
str_states = ' | '.join(G.states)
print("set_xfail states: %s" % str_states)
def load_xfail_conf():
with open(G.opts['XFAIL_CONF'], "r") as In:
for line in In:
if RE.match(r'^\s*#', line):
pass
elif RE.match(r'^\s*$', line):
pass
elif RE.match(r'\s*(.*?)\s*sed\s+-i\s*"([^"]+)"\s+(test.mpi.*)', line):
# -- old "sed" pattern
cond, pat, testlist = RE.m.group(1, 2, 3)
if match_states(cond, G.states):
cond = re.sub(r'\s\s+', ' ', cond)
if testlist not in G.xfails:
G.xfails[testlist] = []
if RE.search(r's[\+\|]\\\((.*)\\\)[\+\|]\\1\s+xfail=(.*)[\+\|]g', pat):
G.xfails[testlist].append({'cond': cond, 'pat': RE.m.group(1), 'reason': RE.m.group(2)})
else:
raise Exception("Unrecognized xfail.conf rule: [%s]\n" % pat)
else:
# print(" Unmatched state [%s] - [%s] - %s" % (cond, pat, testlist))
pass
elif RE.match(r'\s*(.*?)\s*\/(.*)\/\s*xfail=(\w*)\s*(\S+)\s*$', line):
# -- new direct pattern
cond, pat, reason, testlist = RE.m.group(1, 2, 3, 4)
testlist = re.sub(r'^test\/mpi\/', '', testlist)
if match_states(cond, G.states):
cond = re.sub(r'\s\s+', ' ', cond)
if testlist not in G.xfails:
G.xfails[testlist] = []
G.xfails[testlist].append({'cond': cond, 'pat': pat, 'reason': reason})
else:
# print(" Unmatched state [%s] - [%s] - %s" % (cond, pat, testlist))
pass
else:
print("Not parsed: [%s]" % line.rstrip())
def apply_xfails():
for f in sorted (G.xfails.keys()):
if not os.path.exists(f):
print("! testlist: %s does not exist\n" % f)
continue
rules = G.xfails[f]
lines = []
n_applied = 0
print("testlist: %s ..." % f)
with open(f, "r") as In:
for line in In:
for r in rules:
if not RE.search(r'xfail=', line):
if RE.search(r['pat'], line):
print(" %s:\t%s\t-> xfail=%s" % (r['cond'], r['pat'], r['reason']))
if "dryrun" not in G.opts:
line = line.rstrip() + " xfail=%s\n" % (r['reason'])
n_applied+=1
break
lines.append(line)
if n_applied:
with open(f, "w") as Out:
for l in lines:
print(l, end='', file=Out)
else:
print(" %s not changed. Is there a mistake? Rules list:" % f, file=sys.stderr)
for r in rules:
print(" %s \t:%s" % (r['pat'], r['reason']), file=sys.stderr)
def match_states(cond, states):
tlist = cond.split()
for i in range(5):
if not match(tlist[i], G.states[i]):
return 0
return 1
def match(pat, var):
if pat == '*':
return 1
elif RE.match(pat, var):
return 1
else:
return 0
# ---------------------------------------------------------
if __name__ == "__main__":
main() |
298,884 | configure rules list | '''
copyright: Copyright (C) 2015-2022, Wazuh Inc.
Created by Wazuh, Inc. <info@wazuh.com>.
This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
type: integration
brief: The 'wazuh-logtest' tool allows the testing and verification of rules and decoders against provided log examples
remotely inside a sandbox in 'wazuh-analysisd'. This functionality is provided by the manager, whose work
parameters are configured in the ossec.conf file in the XML rule_test section. Test logs can be evaluated through
the 'wazuh-logtest' tool or by making requests via RESTful API. These tests will check if the logtest
configuration is valid. Also checks rules, decoders, decoders, alerts matching logs correctly.
components:
- logtest
suite: ruleset_refresh
targets:
- manager
daemons:
- wazuh-analysisd
os_platform:
- linux
os_version:
- Arch Linux
- Amazon Linux 2
- Amazon Linux 1
- CentOS 8
- CentOS 7
- Debian Buster
- Red Hat 8
- Ubuntu Focal
- Ubuntu Bionic
references:
- https://documentation.wazuh.com/current/user-manual/reference/tools/wazuh-logtest.html
- https://documentation.wazuh.com/current/user-manual/capabilities/wazuh-logtest/index.html
- https://documentation.wazuh.com/current/user-manual/ruleset/testing.html?highlight=logtest
- https://documentation.wazuh.com/current/user-manual/capabilities/wazuh-logtest/logtest-configuration.html
- https://documentation.wazuh.com/current/user-manual/reference/daemons/wazuh-analysisd.html
tags:
- logtest_configuration
'''
import os
import pytest
from wazuh_testing.tools import WAZUH_PATH, LOGTEST_SOCKET_PATH
from yaml import safe_load
from shutil import copy
from json import loads
# Marks
pytestmark = [pytest.mark.linux, pytest.mark.tier(level=0), pytest.mark.server]
# Configurations
test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
messages_path = os.path.join(test_data_path, 'log_alert_level.yaml')
with open(messages_path) as f:
test_cases = safe_load(f)
# Variables
receiver_sockets_params = [(LOGTEST_SOCKET_PATH, 'AF_UNIX', 'TCP')]
receiver_sockets = None
# Fixtures
@pytest.fixture(scope='function')
def METHOD_NAME(get_configuration, request):
"""Configure a custom rules and log alert level for testing.
Restarting Wazuh is not needed for applying the configuration, it is optional.
"""
# configuration for testing
rules_dir = os.path.join(WAZUH_PATH, get_configuration['rule_dir'])
if not os.path.exists(rules_dir):
os.makedirs(rules_dir)
file_test = os.path.join(test_data_path, get_configuration['rule_file'])
file_dst = os.path.join(rules_dir, get_configuration['rule_file'])
copy(file_test, file_dst)
yield
# restore previous configuration
os.remove(file_dst)
if len(os.listdir(rules_dir)) == 0:
os.rmdir(rules_dir)
@pytest.fixture(scope='module', params=test_cases, ids=[test_case['name'] for test_case in test_cases])
def get_configuration(request):
"""Get configurations from the module."""
return request.param
# Tests
def test_rule_list(restart_required_logtest_daemons, get_configuration,
configure_environment, METHOD_NAME,
wait_for_logtest_startup, connect_to_sockets_function):
'''
description: Check that after modifying the alert level it takes effect when opening new logtest sessions, without
having to reset the manager. To do this, it sends a request to logtest socket and gets its response.
Then, it checks that the expected alert matches.
wazuh_min_version: 4.2.0
tier: 0
parameters:
- restart_required_logtest_daemons:
type: fixture
brief: Wazuh logtests daemons handler.
- get_configuration:
type: fixture
brief: Get configurations from the module.
- configure_environment:
type: fixture
brief: Configure a custom environment for testing. Restart Wazuh is needed for applying the configuration.
- configure_rules_list:
type: fixture
brief: Configure custom rules for testing.
- wait_for_logtest_startup:
type: fixture
brief: Wait until logtest has begun.
- connect_to_sockets_function:
type: fixture
brief: Function scope version of 'connect_to_sockets' which connects to the specified sockets for the test.
assertions:
- Verify that the result does not contain errors.
- Verify that the 'rule_id' sent matches with the result.
- Verify that the alert sent matches with the result.
input_description: Some test cases are defined in the module. These include some input configurations stored in
the 'log_alert_level.yaml'.
expected_output:
- result.error == 0
- result.data.output.rule.id == test_case.rule_id
- result.data.alert == test_case.alert
tags:
- rules
- analysisd
'''
# send the logtest request
receiver_sockets[0].send(get_configuration['input'], size=True)
# receive logtest reply and parse it
response = receiver_sockets[0].receive(size=True).rstrip(b'\x00').decode()
result = loads(response)
assert result['error'] == 0
assert result['data']['output']['rule']['id'] == get_configuration['rule_id']
assert result['data']['alert'] is get_configuration['alert'] |
298,885 | writetsv | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from functools import partial
import os
import paddle
from paddle.io import DataLoader
import pandas as pd
from tqdm import tqdm
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Tuple, Pad
from paddlenlp.transformers import MPNetForSequenceClassification, MPNetTokenizer
from run_glue import convert_example
task2filename = {
"cola": "CoLA.tsv",
"sst-2": "SST-2.tsv",
"mrpc": "MRPC.tsv",
"sts-b": "STS-B.tsv",
"qqp": "QQP.tsv",
"mnli": ["MNLI-m.tsv", "MNLI-mm.tsv"],
"rte": "RTE.tsv",
"qnli": "QNLI.tsv",
"wnli": "WNLI.tsv",
}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_path",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--task_name",
type=str,
choices=["cola", "sst-2", "mrpc", "sts-b", "qqp", "mnli", "rte", "qnli", "wnli"],
default="cola",
required=True,
help="task_name.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size per GPU/CPU for training.",
)
args = parser.parse_args()
args.task_name = args.task_name.lower()
return args
def predict(data_loader, model, id2label=None):
outputs = []
progress_bar = tqdm(
range(len(data_loader)),
desc="Predition Iteration",
)
with paddle.no_grad():
for batch in data_loader:
input_ids, segment_ids = batch
logits = model(input_ids)
if id2label is not None:
pred = paddle.argmax(logits, axis=-1).cpu().tolist()
outputs.extend(list(map(lambda x: id2label[x], pred)))
else:
pred = logits.squeeze(-1).cpu().tolist()
outputs.extend(pred)
progress_bar.update(1)
return outputs
def METHOD_NAME(outputs, file):
d = {"index": list(range(len(outputs))), "prediction": outputs}
pd.DataFrame(d).to_csv(file, sep="\t", index=False)
print(f"Save to {file}.")
def predict2file(args):
if args.task_name == "mnli":
test_ds_matched, test_ds_mismatched = load_dataset("glue", "mnli", splits=["test_matched", "test_mismatched"])
id2label = dict(zip(range(len(test_ds_matched.label_list)), test_ds_matched.label_list))
else:
test_ds = load_dataset("glue", args.task_name, splits="test")
if test_ds.label_list is not None:
id2label = dict(zip(range(len(test_ds.label_list)), test_ds.label_list))
else:
id2label = None
model = MPNetForSequenceClassification.from_pretrained(args.ckpt_path)
model.eval()
tokenizer = MPNetTokenizer.from_pretrained(args.ckpt_path)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
): fn(samples)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=None,
max_seq_length=args.max_seq_length,
is_test=True,
)
if args.task_name == "mnli":
test_ds_matched = test_ds_matched.map(trans_func, lazy=True)
test_ds_mismatched = test_ds_mismatched.map(trans_func, lazy=True)
test_batch_sampler_matched = paddle.io.BatchSampler(test_ds_matched, batch_size=args.batch_size, shuffle=False)
test_data_loader_matched = DataLoader(
dataset=test_ds_matched,
batch_sampler=test_batch_sampler_matched,
collate_fn=batchify_fn,
num_workers=2,
return_list=True,
)
test_batch_sampler_mismatched = paddle.io.BatchSampler(
test_ds_mismatched, batch_size=args.batch_size, shuffle=False
)
test_data_loader_mismatched = DataLoader(
dataset=test_ds_mismatched,
batch_sampler=test_batch_sampler_mismatched,
collate_fn=batchify_fn,
num_workers=2,
return_list=True,
)
file_m = os.path.join("template", task2filename[args.task_name][0])
file_mm = os.path.join("template", task2filename[args.task_name][1])
matched_outputs = predict(test_data_loader_matched, model, id2label)
mismatched_outputs = predict(test_data_loader_mismatched, model, id2label)
METHOD_NAME(matched_outputs, file_m)
METHOD_NAME(mismatched_outputs, file_mm)
else:
test_ds = test_ds.map(trans_func, lazy=True)
test_batch_sampler = paddle.io.BatchSampler(test_ds, batch_size=args.batch_size, shuffle=False)
test_data_loader = DataLoader(
dataset=test_ds,
batch_sampler=test_batch_sampler,
collate_fn=batchify_fn,
num_workers=2,
return_list=True,
)
predict_outputs = predict(test_data_loader, model, id2label)
file = os.path.join("template", task2filename[args.task_name])
METHOD_NAME(predict_outputs, file)
if __name__ == "__main__":
args = get_args()
os.makedirs("template", exist_ok=True)
predict2file(args) |
298,886 | embedded artifact | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test cases for :module:`artifact_service_client`."""
# pytype: skip-file
import contextlib
import io
import threading
import unittest
from urllib.parse import quote
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.utils import proto_utils
class InMemoryFileManager(object):
def __init__(self, contents=()):
self._contents = dict(contents)
def get(self, path):
return self._contents[path]
def file_reader(self, path):
return io.BytesIO(self._contents[path])
def file_writer(self, name):
path = 'prefix:' + name
@contextlib.contextmanager
def writable():
buffer = io.BytesIO()
yield buffer
buffer.seek(0)
self._contents[path] = buffer.read()
return writable(), path
class ArtifactServiceTest(unittest.TestCase):
def file_artifact(self, path):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path).SerializeToString())
def METHOD_NAME(self, data, name=None):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.EMBEDDED.urn,
type_payload=beam_runner_api_pb2.EmbeddedFilePayload(
data=data).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn if name else None,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=name).SerializeToString() if name else None)
def test_file_retrieval(self):
file_manager = InMemoryFileManager({
'path/to/a': b'a', 'path/to/b': b'b' * 37
})
retrieval_service = artifact_service.ArtifactRetrievalService(
file_manager.file_reader, chunk_size=10)
dep_a = self.file_artifact('path/to/a')
self.assertEqual(
retrieval_service.ResolveArtifacts(
beam_artifact_api_pb2.ResolveArtifactsRequest(artifacts=[dep_a])),
beam_artifact_api_pb2.ResolveArtifactsResponse(replacements=[dep_a]))
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=dep_a))),
[beam_artifact_api_pb2.GetArtifactResponse(data=b'a')])
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(
artifact=self.file_artifact('path/to/b')))),
[
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 7)
])
def test_embedded_retrieval(self):
retrieval_service = artifact_service.ArtifactRetrievalService(None)
embedded_dep = self.METHOD_NAME(b'some_data')
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(
artifact=embedded_dep))),
[beam_artifact_api_pb2.GetArtifactResponse(data=b'some_data')])
def test_url_retrieval(self):
retrieval_service = artifact_service.ArtifactRetrievalService(None)
url_dep = beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.URL.urn,
type_payload=beam_runner_api_pb2.ArtifactUrlPayload(
url='file:' + quote(__file__)).SerializeToString())
content = b''.join([
r.data for r in retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=url_dep))
])
with open(__file__, 'rb') as fin:
self.assertEqual(content, fin.read())
def test_push_artifacts(self):
unresolved = beam_runner_api_pb2.ArtifactInformation(type_urn='unresolved')
resolved_a = self.METHOD_NAME(data=b'a', name='a.txt')
resolved_b = self.METHOD_NAME(data=b'bb', name='b.txt')
dep_big = self.METHOD_NAME(data=b'big ' * 100, name='big.txt')
class TestArtifacts(object):
def ResolveArtifacts(self, request):
replacements = []
for artifact in request.artifacts:
if artifact.type_urn == 'unresolved':
replacements += [resolved_a, resolved_b]
else:
replacements.append(artifact)
return beam_artifact_api_pb2.ResolveArtifactsResponse(
replacements=replacements)
def GetArtifact(self, request):
if request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn:
content = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.EmbeddedFilePayload).data
for k in range(0, len(content), 13):
yield beam_artifact_api_pb2.GetArtifactResponse(
data=content[k:k + 13])
else:
raise NotImplementedError
file_manager = InMemoryFileManager()
server = artifact_service.ArtifactStagingService(file_manager.file_writer)
server.register_job('staging_token', {'env': [unresolved, dep_big]})
# "Push" artifacts as if from a client.
t = threading.Thread(
target=lambda: artifact_service.offer_artifacts(
server, TestArtifacts(), 'staging_token'))
t.daemon = True
t.start()
resolved_deps = server.resolved_deps('staging_token', timeout=5)['env']
expected = {
'a.txt': b'a',
'b.txt': b'bb',
'big.txt': b'big ' * 100,
}
for dep in resolved_deps:
self.assertEqual(dep.type_urn, common_urns.artifact_types.FILE.urn)
self.assertEqual(dep.role_urn, common_urns.artifact_roles.STAGING_TO.urn)
type_payload = proto_utils.parse_Bytes(
dep.type_payload, beam_runner_api_pb2.ArtifactFilePayload)
role_payload = proto_utils.parse_Bytes(
dep.role_payload, beam_runner_api_pb2.ArtifactStagingToRolePayload)
self.assertTrue(
type_payload.path.endswith(role_payload.staged_name),
type_payload.path)
self.assertEqual(
file_manager.get(type_payload.path),
expected.pop(role_payload.staged_name))
self.assertEqual(expected, {})
if __name__ == '__main__':
unittest.main() |
298,887 | test wsgi generator empty | import sys
import typing
import wsgiref.validate
from functools import partial
from io import StringIO
import pytest
import httpx
if typing.TYPE_CHECKING: # pragma: no cover
from _typeshed.wsgi import StartResponse, WSGIApplication, WSGIEnvironment
def application_factory(output: typing.Iterable[bytes]) -> "WSGIApplication":
def application(environ, start_response):
status = "200 OK"
response_headers = [
("Content-type", "text/plain"),
]
start_response(status, response_headers)
for item in output:
yield item
return wsgiref.validate.validator(application)
def echo_body(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> typing.Iterable[bytes]:
status = "200 OK"
output = environ["wsgi.input"].read()
response_headers = [
("Content-type", "text/plain"),
]
start_response(status, response_headers)
return [output]
def echo_body_with_response_stream(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> typing.Iterable[bytes]:
status = "200 OK"
response_headers = [("Content-Type", "text/plain")]
start_response(status, response_headers)
def output_generator(f: typing.IO[bytes]) -> typing.Iterator[bytes]:
while True:
output = f.read(2)
if not output:
break
yield output
return output_generator(f=environ["wsgi.input"])
def raise_exc(
environ: "WSGIEnvironment",
start_response: "StartResponse",
exc: typing.Type[Exception] = ValueError,
) -> typing.Iterable[bytes]:
status = "500 Server Error"
output = b"Nope!"
response_headers = [
("Content-type", "text/plain"),
]
try:
raise exc()
except exc:
exc_info = sys.exc_info()
start_response(status, response_headers, exc_info)
return [output]
def log_to_wsgi_log_buffer(environ, start_response):
print("test1", file=environ["wsgi.errors"])
environ["wsgi.errors"].write("test2")
return echo_body(environ, start_response)
def test_wsgi():
client = httpx.Client(app=application_factory([b"Hello, World!"]))
response = client.get("http://www.example.org/")
assert response.status_code == 200
assert response.text == "Hello, World!"
def test_wsgi_upload():
client = httpx.Client(app=echo_body)
response = client.post("http://www.example.org/", content=b"example")
assert response.status_code == 200
assert response.text == "example"
def test_wsgi_upload_with_response_stream():
client = httpx.Client(app=echo_body_with_response_stream)
response = client.post("http://www.example.org/", content=b"example")
assert response.status_code == 200
assert response.text == "example"
def test_wsgi_exc():
client = httpx.Client(app=raise_exc)
with pytest.raises(ValueError):
client.get("http://www.example.org/")
def test_wsgi_http_error():
client = httpx.Client(app=partial(raise_exc, exc=RuntimeError))
with pytest.raises(RuntimeError):
client.get("http://www.example.org/")
def test_wsgi_generator():
output = [b"", b"", b"Some content", b" and more content"]
client = httpx.Client(app=application_factory(output))
response = client.get("http://www.example.org/")
assert response.status_code == 200
assert response.text == "Some content and more content"
def METHOD_NAME():
output = [b"", b"", b"", b""]
client = httpx.Client(app=application_factory(output))
response = client.get("http://www.example.org/")
assert response.status_code == 200
assert response.text == ""
def test_logging():
buffer = StringIO()
transport = httpx.WSGITransport(app=log_to_wsgi_log_buffer, wsgi_errors=buffer)
client = httpx.Client(transport=transport)
response = client.post("http://www.example.org/", content=b"example")
assert response.status_code == 200 # no errors
buffer.seek(0)
assert buffer.read() == "test1\ntest2"
@pytest.mark.parametrize(
"url, expected_server_port",
[
pytest.param("http://www.example.org", "80", id="auto-http"),
pytest.param("https://www.example.org", "443", id="auto-https"),
pytest.param("http://www.example.org:8000", "8000", id="explicit-port"),
],
)
def test_wsgi_server_port(url: str, expected_server_port: str) -> None:
"""
SERVER_PORT is populated correctly from the requested URL.
"""
hello_world_app = application_factory([b"Hello, World!"])
server_port: typing.Optional[str] = None
def app(environ, start_response):
nonlocal server_port
server_port = environ["SERVER_PORT"]
return hello_world_app(environ, start_response)
client = httpx.Client(app=app)
response = client.get(url)
assert response.status_code == 200
assert response.text == "Hello, World!"
assert server_port == expected_server_port
def test_wsgi_server_protocol():
server_protocol = None
def app(environ, start_response):
nonlocal server_protocol
server_protocol = environ["SERVER_PROTOCOL"]
start_response("200 OK", [("Content-Type", "text/plain")])
return [b"success"]
with httpx.Client(app=app, base_url="http://testserver") as client:
response = client.get("/")
assert response.status_code == 200
assert response.text == "success"
assert server_protocol == "HTTP/1.1" |
298,888 | build fmu | import argparse
import tempfile
from pathlib import Path
from typing import Union
from .fmi2slave import FMI2_MODEL_OPTIONS
from .builder import FmuBuilder
FilePath = Union[str, Path]
def create_csv_slave(csv_file: FilePath):
classname = csv_file.stem.capitalize()
filename = csv_file.name
return f"""
import re
import csv
from math import isclose # requires >= python 3.5
from pythonfmu.fmi2slave import Fmi2Type, Fmi2Slave, Fmi2Causality, Fmi2Variability, Integer, Real, Boolean, String
def lerp(v0: float, v1: float, t: float) -> float:
return (1 - t) * v0 + t * v1
def normalize(x: float, in_min: float, in_max: float, out_min: float, out_max: float) -> float:
x = max(min(x, in_max), in_min)
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def get_fmi2_type(s: str) -> Fmi2Type:
s_lower = s.lower()
for type in Fmi2Type:
if type.name in s_lower:
if type == Fmi2Type.enumeration:
raise NotImplementedError(f"Unsupported type: {{Fmi2Type.enumeration.name}}")
else:
return type
raise TypeError(f"Could not process type from input string: {{s}}")
TYPE2OBJ = {{
Fmi2Type.integer: Integer,
Fmi2Type.real: Real,
Fmi2Type.boolean: Boolean,
Fmi2Type.string: String
}}
class Header:
def __init__(self, s):
matches = re.findall(r"\\[(.*?)\\]", s)
if len(matches) > 0:
match = matches[-1]
self.name = s.replace("[" + match + "]", "").rstrip()
self.type = get_fmi2_type(match)
else:
self.name = s
self.type = Fmi2Type.real
def __repr__(self):
return f"Header(name={{self.name}}, type={{self.type.name}})"
class {classname}(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.current_index = 0
self.next_index = None
self.current_time = 0.0
self.interpolate = True
data = dict()
def read_csv():
with open(self.resources + '/' + "{filename}") as f:
return list(csv.reader(f, skipinitialspace=True, delimiter=',', quotechar='"'))
read = read_csv()
header_row = read[0]
headers = list(map(lambda h: Header(h.strip()), header_row[1:len(header_row)]))
rows = read[1:len(read)]
self.num_rows = len(rows)
self.times = []
for header in headers:
data[header.name] = []
def get_value(header):
current_value = data[header.name][self.current_index]
if self.next_index is None or header.type is not Fmi2Type.real:
return current_value
next_value = data[header.name][self.next_index]
if current_value == next_value:
return current_value
current_value_t = self.times[self.current_index]
next_value_t = self.times[self.next_index]
t = normalize(self.current_time, current_value_t, next_value_t, 0, 1)
return lerp(current_value, next_value, t)
self.register_variable(
TYPE2OBJ[header.type](header.name,
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant,
getter=lambda header=header: get_value(header)), nested=False)
for i in range(0, self.num_rows):
row = rows[i]
self.times.append(float(row[0]))
for j in range(1, len(row)):
header = headers[j-1]
if header.type == Fmi2Type.integer:
data[header.name].append(int(row[j]))
elif header.type == Fmi2Type.real:
data[header.name].append(float(row[j]))
elif header.type == Fmi2Type.boolean:
data[header.name].append(row[j] == 'true')
elif header.type == Fmi2Type.string:
data[header.name].append(row[j])
self.register_variable(Integer("num_rows",
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant))
self.register_variable(Real("end_time",
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant,
getter=lambda: self.times[-1]))
self.register_variable(Boolean("interpolate",
causality=Fmi2Causality.parameter,
variability=Fmi2Variability.tunable))
def find_indices(self, t, dt):
current_t = self.times[self.current_index]
while current_t < t:
if self.current_index == self.num_rows-1:
break
self.current_index += 1
current_t = self.times[self.current_index]
if current_t > t and not isclose(current_t, t, rel_tol=1e-6):
self.current_index -= 1
current_t = self.times[self.current_index]
if self.interpolate and self.current_index <= self.num_rows-2:
self.next_index = self.current_index+1
next_t = self.times[self.next_index]
while t+dt >= next_t and not isclose(t+dt, next_t, abs_tol=1e-6):
if self.next_index + 1 < self.num_rows:
self.next_index += 1
next_t = self.times[self.next_index]
def setup_experiment(self, start_time: float):
self.current_time = start_time
self.find_indices(start_time, 0)
def do_step(self, current_time: float, step_size: float) -> bool:
if (self.current_index == self.num_rows):
return False
self.current_time = current_time + step_size
self.find_indices(self.current_time, step_size)
return True
"""
class CsvFmuBuilder:
@staticmethod
def METHOD_NAME(
csv_file: FilePath,
dest: FilePath = ".",
**options,
) -> Path:
csv_file = Path(csv_file)
if not csv_file.exists():
raise ValueError(f"No such file {csv_file!s}")
if not csv_file.suffix.endswith(".csv"):
raise ValueError(f"File {csv_file!s} must have extension '.csv'!")
options["dest"] = dest
options["project_files"] = {csv_file}
with tempfile.TemporaryDirectory(prefix="pythonfmu_") as tempd:
temp_dir = Path(tempd)
script_file = temp_dir / (csv_file.stem + ".py")
with open(script_file, "+w") as f:
f.write(create_csv_slave(csv_file))
options["script_file"] = script_file
return FmuBuilder.METHOD_NAME(**options)
def create_command_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-f",
"--file",
dest="csv_file",
help="Path to the CSV file.",
required=True
)
parser.add_argument(
"-d", "--dest", dest="dest", help="Where to save the FMU.", default="."
)
parser.add_argument(
"--doc",
dest="documentation_folder",
help="Documentation folder to include in the FMU.",
default=None
)
for option in FMI2_MODEL_OPTIONS:
action = "store_false" if option.value else "store_true"
parser.add_argument(
f"--{option.cli}",
dest=option.name,
help=f"If given, {option.name}={action[6:]}",
action=action
)
parser.set_defaults(execute=CsvFmuBuilder.METHOD_NAME) |
298,889 | main | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from collections import OrderedDict
import defusedxml.cElementTree as ET
import numpy as np
from torch import randn
from tools.debug.common import get_full_dump_paths
from tools.debug.common import load_torch_model
from tools.debug.common import print_args
from tools.debug.common import register_print_hooks
from tools.debug.common import save_dump
from tools.ir_utils import find_all_parameters
from tools.ir_utils import get_ir_paths
argparser = argparse.ArgumentParser()
argparser.add_argument("-m", "--model", help="input IR name", required=True)
argparser.add_argument("--bin", help="Input *.bin file name")
argparser.add_argument("-o", "--output-dir", help="Output directory to dump weights", required=True)
argparser.add_argument("-c", "--config", type=str, default="config.json", help="Model's config", required=True)
argparser.add_argument("-n", "--num-layers", type=int, default=-1, help="Compare weights for given number of layers")
argparser.add_argument("--ignore", help="comma separated list of ignored layers", default="")
args = argparser.parse_args()
print_args(args)
def METHOD_NAME():
model_bin, model_xml = get_ir_paths(args.model, args.bin)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
ir_weights = collect_IR_weights(os.path.join(args.output_dir, "IR"), model_xml, model_bin, args.num_layers)
with open(args.config, encoding="utf8") as f:
config = json.load(f)
torch_weights = collect_torch_weights(os.path.join(args.output_dir, "PTH"), config, args.num_layers)
assert len(ir_weights) == len(torch_weights), "{} vs {}".format(len(ir_weights), len(torch_weights))
print("Maximum of absolute difference - IR vs Torch")
max_max = []
for (k1, v1), (k2, v2) in zip(ir_weights.items(), torch_weights.items()):
max_diff = abs(v1 - v2).max()
max_max.append(max_diff)
print("{0:.5} - max diff [{1:}] vs [{2:}]".format(max_diff, k1, k2))
print("Global maximum: {0:.5}".format(np.max(max_max)))
def collect_IR_weights(output_dir, model_xml, model_bin, num_layers):
data_to_compare = OrderedDict()
print("IR loaded from {}".format(model_bin))
with open(model_bin, "rb") as f:
buffer = f.read()
ignored = args.ignore.split(",") + get_ignored_layers(model_xml, args.num_layers)
all_parameters = find_all_parameters(buffer, model_xml)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
idx = 0
for name, param in all_parameters.items():
if name.split(".")[0] in ignored or "bias" in name:
continue
if (num_layers > 0 and idx < num_layers) or (num_layers == -1):
name = name.replace(os.path.sep, "_")
dump_name = ".".join([str(idx), name])
output_data = param.data.flatten()
save_dump(dump_name, output_dir, output_data)
data_to_compare[dump_name] = output_data
idx += 1
return data_to_compare
def collect_torch_weights(output_dir, config, num_layers):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model = load_torch_model(config)
model_e = model.eval()
data_to_compare = OrderedDict()
register_print_hooks(
output_dir, model_e, num_layers=num_layers, data_to_compare=data_to_compare, dump_activations=False
)
input_ = randn(config["input_sample_size"])
model_e(input_)
for _, module in enumerate(model_e.modules()):
paths = get_full_dump_paths(module)
if paths is not None:
for dump_path in paths:
if os.path.isfile(dump_path):
data_to_compare[os.path.splitext(os.path.basename(dump_path))[0]] = np.load(dump_path)
return data_to_compare
def get_ignored_layers(model_xml, num_layers=1):
ir_tree = ET.parse(model_xml)
ignored_layers = []
all_supported = [l for l in ir_tree.iter("layer") if l.get("type") == ("Convolution", "FullyConnected")]
if num_layers > 0:
ignored_layers += [layer.get("name") for layer in all_supported[num_layers:]]
all_bns = [l for l in ir_tree.iter("layer") if l.get("type") == "ScaleShift"]
ignored_layers += [bn.get("name") for bn in all_bns]
return ignored_layers
if __name__ == "__main__":
METHOD_NAME() |
298,890 | scrub item | # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Cleans extraneous metadata from files' tags via a command or
automatically whenever tags are written.
"""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
from beets import config
import mediafile
import mutagen
_MUTAGEN_FORMATS = {
'asf': 'ASF',
'apev2': 'APEv2File',
'flac': 'FLAC',
'id3': 'ID3FileType',
'mp3': 'MP3',
'mp4': 'MP4',
'oggflac': 'OggFLAC',
'oggspeex': 'OggSpeex',
'oggtheora': 'OggTheora',
'oggvorbis': 'OggVorbis',
'oggopus': 'OggOpus',
'trueaudio': 'TrueAudio',
'wavpack': 'WavPack',
'monkeysaudio': 'MonkeysAudio',
'optimfrog': 'OptimFROG',
}
class ScrubPlugin(BeetsPlugin):
"""Removes extraneous metadata from files' tags."""
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
})
if self.config['auto']:
self.register_listener("import_task_files", self.import_task_files)
def commands(self):
def scrub_func(lib, opts, args):
# Walk through matching files and remove tags.
for item in lib.items(ui.decargs(args)):
self._log.info('scrubbing: {0}',
util.displayable_path(item.path))
self.METHOD_NAME(item, opts.write)
scrub_cmd = ui.Subcommand('scrub', help='clean audio tags')
scrub_cmd.parser.add_option(
'-W', '--nowrite', dest='write',
action='store_false', default=True,
help='leave tags empty')
scrub_cmd.func = scrub_func
return [scrub_cmd]
@staticmethod
def _mutagen_classes():
"""Get a list of file type classes from the Mutagen module.
"""
classes = []
for modname, clsname in _MUTAGEN_FORMATS.items():
mod = __import__(f'mutagen.{modname}',
fromlist=[clsname])
classes.append(getattr(mod, clsname))
return classes
def _scrub(self, path):
"""Remove all tags from a file.
"""
for cls in self._mutagen_classes():
# Try opening the file with this type, but just skip in the
# event of any error.
try:
f = cls(util.syspath(path))
except Exception:
continue
if f.tags is None:
continue
# Remove the tag for this type.
try:
f.delete()
except NotImplementedError:
# Some Mutagen metadata subclasses (namely, ASFTag) do not
# support .delete(), presumably because it is impossible to
# remove them. In this case, we just remove all the tags.
for tag in f.keys():
del f[tag]
f.save()
except (OSError, mutagen.MutagenError) as exc:
self._log.error('could not scrub {0}: {1}',
util.displayable_path(path), exc)
def METHOD_NAME(self, item, restore=True):
"""Remove tags from an Item's associated file and, if `restore`
is enabled, write the database's tags back to the file.
"""
# Get album art if we need to restore it.
if restore:
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
except mediafile.UnreadableFileError as exc:
self._log.error('could not open file to scrub: {0}',
exc)
return
images = mf.images
# Remove all tags.
self._scrub(item.path)
# Restore tags, if enabled.
if restore:
self._log.debug('writing new tags after scrub')
item.try_write()
if images:
self._log.debug('restoring art')
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
mf.images = images
mf.save()
except mediafile.UnreadableFileError as exc:
self._log.error('could not write tags: {0}', exc)
def import_task_files(self, session, task):
"""Automatically scrub imported files."""
for item in task.imported_items():
self._log.debug('auto-scrubbing {0}',
util.displayable_path(item.path))
self.METHOD_NAME(item) |
298,891 | test control keys | from __future__ import annotations
import pytest
from prompt_toolkit.input.vt100_parser import Vt100Parser
from prompt_toolkit.keys import Keys
class _ProcessorMock:
def __init__(self):
self.keys = []
def feed_key(self, key_press):
self.keys.append(key_press)
@pytest.fixture
def processor():
return _ProcessorMock()
@pytest.fixture
def stream(processor):
return Vt100Parser(processor.feed_key)
def METHOD_NAME(processor, stream):
stream.feed("\x01\x02\x10")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.ControlA
assert processor.keys[1].key == Keys.ControlB
assert processor.keys[2].key == Keys.ControlP
assert processor.keys[0].data == "\x01"
assert processor.keys[1].data == "\x02"
assert processor.keys[2].data == "\x10"
def test_arrows(processor, stream):
stream.feed("\x1b[A\x1b[B\x1b[C\x1b[D")
assert len(processor.keys) == 4
assert processor.keys[0].key == Keys.Up
assert processor.keys[1].key == Keys.Down
assert processor.keys[2].key == Keys.Right
assert processor.keys[3].key == Keys.Left
assert processor.keys[0].data == "\x1b[A"
assert processor.keys[1].data == "\x1b[B"
assert processor.keys[2].data == "\x1b[C"
assert processor.keys[3].data == "\x1b[D"
def test_escape(processor, stream):
stream.feed("\x1bhello")
assert len(processor.keys) == 1 + len("hello")
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "h"
assert processor.keys[0].data == "\x1b"
assert processor.keys[1].data == "h"
def test_special_double_keys(processor, stream):
stream.feed("\x1b[1;3D") # Should both send escape and left.
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == Keys.Left
assert processor.keys[0].data == "\x1b[1;3D"
assert processor.keys[1].data == ""
def test_flush_1(processor, stream):
# Send left key in two parts without flush.
stream.feed("\x1b")
stream.feed("[D")
assert len(processor.keys) == 1
assert processor.keys[0].key == Keys.Left
assert processor.keys[0].data == "\x1b[D"
def test_flush_2(processor, stream):
# Send left key with a 'Flush' in between.
# The flush should make sure that we process everything before as-is,
# with makes the first part just an escape character instead.
stream.feed("\x1b")
stream.flush()
stream.feed("[D")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "["
assert processor.keys[2].key == "D"
assert processor.keys[0].data == "\x1b"
assert processor.keys[1].data == "["
assert processor.keys[2].data == "D"
def test_meta_arrows(processor, stream):
stream.feed("\x1b\x1b[D")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == Keys.Left
def test_control_square_close(processor, stream):
stream.feed("\x1dC")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.ControlSquareClose
assert processor.keys[1].key == "C"
def test_invalid(processor, stream):
# Invalid sequence that has at two characters in common with other
# sequences.
stream.feed("\x1b[*")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "["
assert processor.keys[2].key == "*"
def test_cpr_response(processor, stream):
stream.feed("a\x1b[40;10Rb")
assert len(processor.keys) == 3
assert processor.keys[0].key == "a"
assert processor.keys[1].key == Keys.CPRResponse
assert processor.keys[2].key == "b"
def test_cpr_response_2(processor, stream):
# Make sure that the newline is not included in the CPR response.
stream.feed("\x1b[40;1R\n")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.CPRResponse
assert processor.keys[1].key == Keys.ControlJ |
298,892 | get video cache | """Views for videos embedded in various models."""
from __future__ import annotations
from datetime import datetime
from typing import Optional, Union, cast
from typing_extensions import TypedDict
import requests
import vimeo
from flask import current_app
from pytz import utc
from sentry_sdk import capture_exception
from coaster.utils import parse_duration, parse_isoformat
from .. import redis_store
from ..models import Proposal, Session, VideoError, VideoMixin
class YoutubeApiError(VideoError):
"""The YouTube API failed."""
class VideoData(TypedDict):
"""Dictionary for video data, as used in templates."""
source: str
id: str # noqa: A003
url: str
embeddable_url: str
duration: float
uploaded_at: Union[str, datetime]
thumbnail: str
def video_cache_key(obj: VideoMixin) -> str:
if obj.video_source and obj.video_id:
return 'video_cache/' + obj.video_source + '/' + obj.video_id
raise VideoError("No video source or ID to create a cache key")
def METHOD_NAME(obj: VideoMixin) -> Optional[VideoData]:
data = redis_store.hgetall(video_cache_key(obj))
if data:
if 'uploaded_at' in data and data['uploaded_at']:
data['uploaded_at'] = parse_isoformat(data['uploaded_at'], naive=False)
if 'duration' in data and data['duration']:
data['duration'] = float(data['duration'])
return data
def set_video_cache(obj: VideoMixin, data: VideoData, exists: bool = True) -> None:
cache_key = video_cache_key(obj)
copied_data = data.copy()
if copied_data['uploaded_at']:
copied_data['uploaded_at'] = cast(
datetime, copied_data['uploaded_at']
).isoformat()
redis_store.hmset(cache_key, copied_data)
# if video exists at source, cache for 2 days, if not, for 6 hours
hours_to_cache = 2 * 24 if exists else 6
redis_store.expire(cache_key, 60 * 60 * hours_to_cache)
@Proposal.views('video', cached_property=True)
@Session.views('video', cached_property=True)
def video_property(obj: VideoMixin) -> Optional[VideoData]:
data: Optional[VideoData] = None
exists = True
if obj.video_source and obj.video_id:
# Check for cached data
data = METHOD_NAME(obj)
if not data:
data = {
'source': obj.video_source,
'id': obj.video_id,
'url': cast(str, obj.video_url),
'embeddable_url': cast(str, obj.embeddable_video_url),
'duration': 0.0,
'uploaded_at': '',
'thumbnail': '',
}
if obj.video_source == 'youtube':
video_url = (
f'https://www.googleapis.com/youtube/v3/videos'
f'?part=snippet,contentDetails&id={obj.video_id}'
f'&key={current_app.config["YOUTUBE_API_KEY"]}'
)
try:
youtube_resp = requests.get(video_url, timeout=30)
except requests.exceptions.RequestException as exc:
current_app.logger.error("YouTube API request error: %s", repr(exc))
capture_exception(exc)
return data
if youtube_resp.status_code == 200:
try:
youtube_video = youtube_resp.json()
except requests.exceptions.JSONDecodeError as exc:
current_app.logger.error(
"Unable to parse JSON response while calling '%s'",
video_url,
)
capture_exception(exc)
if not youtube_video or 'items' not in youtube_video:
raise YoutubeApiError(
"API Error: Check the YouTube URL or API key"
)
if not youtube_video['items']:
# Response has zero item for our given video ID. This will
# happen if the video has been removed from YouTube.
exists = False
else:
youtube_video = youtube_video['items'][0]
data['duration'] = parse_duration(
youtube_video['contentDetails']['duration']
).total_seconds()
data['uploaded_at'] = parse_isoformat(
youtube_video['snippet']['publishedAt'], naive=False
)
data['thumbnail'] = youtube_video['snippet']['thumbnails'][
'medium'
]['url']
else:
current_app.logger.error(
"HTTP %s: YouTube API request failed for url '%s'",
youtube_resp.status_code,
video_url,
)
elif obj.video_source == 'vimeo':
vimeo_client = vimeo.VimeoClient(
token=current_app.config.get('VIMEO_ACCESS_TOKEN'),
key=current_app.config.get('VIMEO_CLIENT_ID'),
secret=current_app.config.get('VIMEO_CLIENT_SECRET'),
)
video_url = f'/videos/{obj.video_id}'
try:
vimeo_resp = vimeo_client.get(video_url)
except requests.exceptions.RequestException as exc:
current_app.logger.error("Vimeo API request error: %s", repr(exc))
capture_exception(exc)
return data
if vimeo_resp.status_code == 200:
vimeo_video = vimeo_resp.json()
data['duration'] = float(vimeo_video['duration'])
# Vimeo returns naive datetime, we will add utc timezone to it
data['uploaded_at'] = utc.localize(
parse_isoformat(vimeo_video['release_time'])
)
data['thumbnail'] = vimeo_video['pictures']['sizes'][1]['link']
elif vimeo_resp.status_code == 404:
# Video doesn't exist on Vimeo anymore
exists = False
else:
# Vimeo API down or returning unexpected values
exists = False
current_app.logger.error(
"HTTP %s: Vimeo API request failed for url '%s'",
vimeo_resp.status_code,
video_url,
)
set_video_cache(obj, data, exists)
return data |
298,893 | test workers get allocated | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe and Contributors
# See license.txt
import unittest
from unittest.mock import MagicMock, Mock, patch
import frappe
from press.press.doctype.agent_job.agent_job import AgentJob
from press.press.doctype.bench.bench import Bench, StagingSite
from press.press.doctype.plan.test_plan import create_test_plan
from press.press.doctype.server.server import scale_workers
from press.press.doctype.site.test_site import create_test_bench, create_test_site
from press.press.doctype.subscription.test_subscription import create_test_subscription
@patch.object(AgentJob, "enqueue_http_request", new=Mock())
class TestStagingSite(unittest.TestCase):
def tearDown(self):
frappe.db.rollback()
def test_create_staging_site(self):
bench = create_test_bench() # also creates press settings
frappe.db.set_single_value(
"Press Settings", "staging_plan", create_test_plan("Site").name
)
count_before = frappe.db.count("Site")
site = StagingSite(bench).insert()
self.assertTrue(site.staging)
count_after = frappe.db.count("Site")
self.assertEqual(count_after - count_before, 1)
@patch.object(AgentJob, "after_insert", new=Mock())
@patch("press.press.doctype.server.server.frappe.db.commit", new=MagicMock)
class TestBench(unittest.TestCase):
def tearDown(self):
frappe.db.rollback()
def _create_bench_with_n_sites_with_cpu_time(
self, n: int, x: float, bench: str = None
) -> Bench:
"""Creates new bench if None given."""
plan = create_test_plan("Site", cpu_time=x)
if not bench:
site = create_test_site()
create_test_subscription(site.name, plan.name, site.team) # map site with plan
bench = site.bench
n -= 1
for i in range(n):
site = create_test_site(bench=bench)
create_test_subscription(site.name, plan.name, site.team)
return frappe.get_doc("Bench", bench)
def test_work_load_is_calculated_correctly(self):
bench = self._create_bench_with_n_sites_with_cpu_time(3, 5)
self.assertEqual(bench.work_load, 15)
bench = self._create_bench_with_n_sites_with_cpu_time(3, 10, bench.name)
self.assertEqual(bench.work_load, 45)
def test_work_load_gives_reasonable_numbers(self):
bench1 = self._create_bench_with_n_sites_with_cpu_time(3, 5)
bench2 = self._create_bench_with_n_sites_with_cpu_time(3, 10)
bench3 = self._create_bench_with_n_sites_with_cpu_time(6, 5)
bench4 = self._create_bench_with_n_sites_with_cpu_time(6, 10)
self.assertGreater(bench2.work_load, bench1.work_load)
self.assertGreater(bench4.work_load, bench3.work_load)
self.assertGreater(bench4.work_load, bench2.work_load)
def METHOD_NAME(self):
bench = self._create_bench_with_n_sites_with_cpu_time(3, 5)
workers_before = (bench.background_workers, bench.gunicorn_workers) # 1, 2
scale_workers()
bench.reload()
workers_after = (bench.background_workers, bench.gunicorn_workers)
self.assertGreater(workers_after[1], workers_before[1])
self.assertGreater(workers_after[0], workers_before[0])
def test_auto_scale_uses_release_groups_workers_when_set(self):
bench = self._create_bench_with_n_sites_with_cpu_time(3, 5)
self.assertEqual(bench.gunicorn_workers, 2)
self.assertEqual(bench.background_workers, 1)
group = frappe.get_doc("Release Group", bench.group)
scale_workers()
bench.reload()
self.assertEqual(bench.gunicorn_workers, 24)
self.assertEqual(bench.background_workers, 8)
group.db_set("gunicorn_workers", 8)
group.db_set("background_workers", 4)
scale_workers()
bench.reload()
self.assertEqual(bench.gunicorn_workers, 8)
self.assertEqual(bench.background_workers, 4)
def test_auto_scale_uses_release_groups_workers_respecting_ram_available_on_server(
self,
):
bench = self._create_bench_with_n_sites_with_cpu_time(3, 5)
group = frappe.get_doc("Release Group", bench.group)
group.db_set("gunicorn_workers", 48)
group.db_set("background_workers", 8)
scale_workers()
bench.reload()
self.assertEqual(bench.gunicorn_workers, 48)
bench2 = create_test_bench(
group=frappe.get_doc("Release Group", bench.group), server=bench.server
)
self._create_bench_with_n_sites_with_cpu_time(3, 5, bench2.name)
scale_workers()
bench.reload()
bench2.reload()
# assuming max gunicorn workers for default server (16gb RAM) is 52
self.assertLess(bench.gunicorn_workers, 48)
self.assertLess(bench2.gunicorn_workers, 48) |
298,894 | parse rf item | # coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
float_or_none,
int_or_none,
try_get,
urlencode_postdata,
)
class CiscoLiveBaseIE(InfoExtractor):
# These appear to be constant across all Cisco Live presentations
# and are not tied to any user session or event
RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s'
HEADERS = {
'Origin': 'https://ciscolive.cisco.com',
'rfApiProfileId': RAINFOCUS_API_PROFILE_ID,
'rfWidgetId': RAINFOCUS_WIDGET_ID,
}
def _call_api(self, ep, rf_id, query, referrer, note=None):
headers = self.HEADERS.copy()
headers['Referer'] = referrer
return self._download_json(
self.RAINFOCUS_API_URL % ep, rf_id, note=note,
data=urlencode_postdata(query), headers=headers)
def METHOD_NAME(self, rf_item):
event_name = rf_item.get('eventName')
title = rf_item['title']
description = clean_html(rf_item.get('abstract'))
presenter_name = try_get(rf_item, lambda x: x['participants'][0]['fullName'])
bc_id = rf_item['videos'][0]['url']
bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_id
duration = float_or_none(try_get(rf_item, lambda x: x['times'][0]['length']))
location = try_get(rf_item, lambda x: x['times'][0]['room'])
if duration:
duration = duration * 60
return {
'_type': 'url_transparent',
'url': bc_url,
'ie_key': 'BrightcoveNew',
'title': title,
'description': description,
'duration': duration,
'creator': presenter_name,
'location': location,
'series': event_name,
}
class CiscoLiveSessionIE(CiscoLiveBaseIE):
_VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/[^#]*#/session/(?P<id>[^/?&]+)'
_TESTS = [{
'url': 'https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs',
'md5': 'c98acf395ed9c9f766941c70f5352e22',
'info_dict': {
'id': '5803694304001',
'ext': 'mp4',
'title': '13 Smart Automations to Monitor Your Cisco IOS Network',
'description': 'md5:ec4a436019e09a918dec17714803f7cc',
'timestamp': 1530305395,
'upload_date': '20180629',
'uploader_id': '5647924234001',
'location': '16B Mezz.',
},
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.event=ciscoliveemea2019#/session/15361595531500013WOU',
'only_matching': True,
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?#/session/1490051371645001kNaS',
'only_matching': True,
}]
def _real_extract(self, url):
rf_id = self._match_id(url)
rf_result = self._call_api('session', rf_id, {'id': rf_id}, url)
return self.METHOD_NAME(rf_result['items'][0])
class CiscoLiveSearchIE(CiscoLiveBaseIE):
_VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/(?:global/)?on-demand-library(?:\.html|/)'
_TESTS = [{
'url': 'https://ciscolive.cisco.com/on-demand-library/?search.event=ciscoliveus2018&search.technicallevel=scpsSkillLevel_aintroductory&search.focus=scpsSessionFocus_designAndDeployment#/',
'info_dict': {
'title': 'Search query',
},
'playlist_count': 5,
}, {
'url': 'https://ciscolive.cisco.com/on-demand-library/?search.technology=scpsTechnology_applicationDevelopment&search.technology=scpsTechnology_ipv6&search.focus=scpsSessionFocus_troubleshootingTroubleshooting#/',
'only_matching': True,
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.technicallevel=scpsSkillLevel_aintroductory&search.event=ciscoliveemea2019&search.technology=scpsTechnology_dataCenter&search.focus=scpsSessionFocus_bestPractices#/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if CiscoLiveSessionIE.suitable(url) else super(CiscoLiveSearchIE, cls).suitable(url)
@staticmethod
def _check_bc_id_exists(rf_item):
return int_or_none(try_get(rf_item, lambda x: x['videos'][0]['url'])) is not None
def _entries(self, query, url):
query['size'] = 50
query['from'] = 0
for page_num in itertools.count(1):
results = self._call_api(
'search', None, query, url,
'Downloading search JSON page %d' % page_num)
sl = try_get(results, lambda x: x['sectionList'][0], dict)
if sl:
results = sl
items = results.get('items')
if not items or not isinstance(items, list):
break
for item in items:
if not isinstance(item, dict):
continue
if not self._check_bc_id_exists(item):
continue
yield self.METHOD_NAME(item)
size = int_or_none(results.get('size'))
if size is not None:
query['size'] = size
total = int_or_none(results.get('total'))
if total is not None and query['from'] + query['size'] > total:
break
query['from'] += query['size']
def _real_extract(self, url):
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
query['type'] = 'session'
return self.playlist_result(
self._entries(query, url), playlist_title='Search query') |
298,895 | test process dataset | #!/usr/bin/env python
#
# Run me as follows:
# cd tests/
# nosetests
# For plotting and testing
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
import matplotlib.style
matplotlib.style.use('seaborn-ticks')
from pytest import raises
import numpy.testing as npt
import os
from types import ModuleType
# Msaf imports
import msaf
from msaf.features import Features
from msaf.exceptions import (NoHierBoundaryError, FeaturesNotFound,
NoAudioFileError)
# Global vars
audio_file = os.path.join("fixtures", "chirp.mp3")
long_audio_file = os.path.join("fixtures", "Sargon_test", "audio",
"Mindless_cut.mp3")
fake_module_name = "fake_name_module"
def test_get_boundaries_module():
# Check that it returns modules for all the existing MSAF boundaries algos
bound_ids = msaf.io.get_all_boundary_algorithms()
for bound_id in bound_ids:
bound_module = msaf.run.get_boundaries_module(bound_id)
assert isinstance(bound_module, ModuleType)
# Check that "gt" returns None
assert msaf.run.get_boundaries_module("gt") is None
# Check that a AttributeError is raised when calling it with non-existent
# boundary id
with raises(RuntimeError):
msaf.run.get_boundaries_module(fake_module_name)
# Check that a RuntimeError is raised when calling it with invalid
# boundary id
with raises(RuntimeError):
msaf.run.get_boundaries_module("fmc2d")
def test_get_labels_module():
# Check that it returns modules for all the existing MSAF boundaries algos
label_ids = msaf.io.get_all_label_algorithms()
for label_id in label_ids:
label_module = msaf.run.get_labels_module(label_id)
assert isinstance(label_module, ModuleType)
# Check that None returns None
assert msaf.run.get_labels_module(None) is None
# Check that a AttributeError is raised when calling it with non-existent
# labels id
with raises(RuntimeError):
msaf.run.get_labels_module(fake_module_name)
# Check that a RuntimeError is raised when calling it with invalid
# labels id
with raises(RuntimeError):
msaf.run.get_labels_module("foote")
def test_run_algorithms():
"""Test running all the algorithms."""
bound_ids = msaf.io.get_all_boundary_algorithms()
label_ids = msaf.io.get_all_label_algorithms()
# Add ground truth to boundary id
bound_ids += ["gt"]
# Add None to labels
label_ids += [None]
# Config params
feature = "pcp"
annot_beats = False
framesync = False
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
# Running all algorithms on a file that is too short
for bound_id in bound_ids:
for label_id in label_ids:
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = False
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
assert len(est_times) == 2
assert len(est_labels) == 1
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Commpute and save features for long audio file
file_struct = msaf.io.FileStruct(long_audio_file)
file_struct.features_file = msaf.config.features_tmp_file
def _test_run_msaf(bound_id, label_id, hier=False):
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = hier
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
# Take the first level if hierarchy algorithm
if hier:
est_times = est_times[0]
est_labels = est_labels[0]
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
assert len(est_times) - 1 == len(est_labels)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Running all boundary algorithms on a relatively long file
# Combining boundaries with labels
for bound_id in bound_ids:
if bound_id == "gt":
continue
for label_id in label_ids:
_test_run_msaf(bound_id, label_id, False)
# Test the hierarchical algorithms
hier_ids = ["olda", "scluster"]
for hier_bounds_id in hier_ids:
for hier_labels_id in hier_ids:
if hier_labels_id == "olda":
hier_labels_id = "fmc2d"
_test_run_msaf(hier_bounds_id, hier_labels_id, True)
def test_no_bound_hierarchical():
with raises(NoHierBoundaryError):
msaf.run.run_hierarchical(None, None, None, None, None)
def test_no_gt_flat_bounds():
"""Make sure the results are empty if there is not ground truth found."""
feature = "pcp"
annot_beats = False
framesync = False
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
config = {}
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_flat(file_struct, None, None,
None, config, 0)
assert(not est_times)
assert(not est_labels)
def test_process_track():
bounds_id = "foote"
labels_id = None
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
file_struct.est_file = "tmp.json"
config = {}
config["feature"] = "pcp"
config["annot_beats"] = False
config["framesync"] = False
config["hier"] = False
est_times, est_labels = msaf.run.process_track(
file_struct, bounds_id, labels_id, config)
assert os.path.isfile(file_struct.est_file)
os.remove(file_struct.est_file)
def test_process_with_gt():
bounds_id = "gt"
labels_id = "fmc2d"
est_times, est_labels = msaf.run.process(
long_audio_file, boundaries_id=bounds_id, labels_id=labels_id)
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1
def test_process_wrong_feature():
feature = "caca"
with raises(FeaturesNotFound):
est_times, est_labels = msaf.run.process(long_audio_file, feature=feature)
def test_process_wrong_path():
wrong_path = "caca.mp3"
with raises(NoAudioFileError):
est_times, est_labels = msaf.run.process(wrong_path)
def test_process():
est_times, est_labels = msaf.run.process(long_audio_file)
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1
def test_process_sonify():
out_wav = "out_wav.wav"
est_times, est_labels = msaf.run.process(long_audio_file,
sonify_bounds=True,
out_bounds=out_wav)
assert os.path.isfile(out_wav)
os.remove(out_wav)
def METHOD_NAME():
ds_path = os.path.join("fixtures", "Sargon_test")
res = msaf.run.process(ds_path)
est_times, est_labels = res[0]
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1 |
298,896 | test large system | # -*- coding: utf-8 -*-
"""test_table_streaming_support.py:
Test the streaming support in moose.Table.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import os
import sys
import moose
import numpy as np
print( '[INFO] Using moose form %s' % moose.__file__ )
def print_table( table ):
msg = ""
msg += " datafile : %s" % table.datafile
msg += " useStreamer: %s" % table.useStreamer
msg += ' Path: %s' % table.path
print( msg )
def test_small( ):
moose.CubeMesh( '/compt' )
r = moose.Reac( '/compt/r' )
a = moose.Pool( '/compt/a' )
a.concInit = 1
b = moose.Pool( '/compt/b' )
b.concInit = 2
c = moose.Pool( '/compt/c' )
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
tabA = moose.Table2( '/compt/a/tabA' )
# tabA.format = 'npy'
tabA.useStreamer = True # Setting format alone is not good enough
# Setting datafile enables streamer.
tabB = moose.Table2( '/compt/b/tabB' )
tabB.datafile = 'table2.npy'
tabC = moose.Table2( '/compt/c/tabC' )
tabC.datafile = 'tablec.csv'
moose.connect( tabA, 'requestOut', a, 'getConc' )
moose.connect( tabB, 'requestOut', b, 'getConc' )
moose.connect( tabC, 'requestOut', c, 'getConc' )
moose.reinit( )
[ print_table( x) for x in [tabA, tabB, tabC] ]
runtime = 1000
print( 'Starting moose for %d secs' % runtime )
moose.start( runtime, 1 )
# Now read the numpy and csv and check the results.
a = np.loadtxt( tabA.datafile, skiprows=1 )
b = np.load( 'table2.npy' )
c = np.loadtxt( 'tablec.csv', skiprows=1 )
assert (len(a) == len(b) == len(c))
print( ' MOOSE is done' )
def buildLargeSystem(useStreamer = False):
# create a huge system.
if moose.exists('/comptB'):
moose.delete('/comptB')
moose.CubeMesh( '/comptB' )
tables = []
for i in range(300):
r = moose.Reac('/comptB/r%d'%i)
a = moose.Pool('/comptB/a%d'%i)
a.concInit = 10.0
b = moose.Pool('/comptB/b%d'%i)
b.concInit = 2.0
c = moose.Pool('/comptB/c%d'%i)
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
# Make table name large enough such that the header is larger than 2^16
# . Numpy version 1 can't handle such a large header. If format 1 is
# then this test will fail.
t = moose.Table2('/comptB/TableO1%d'%i + 'abc'*100)
moose.connect(t, 'requestOut', a, 'getConc')
tables.append(t)
if useStreamer:
s = moose.Streamer('/comptB/streamer')
s.datafile = 'data2.npy'
print("[INFO ] Total tables %d" % len(tables))
# Add tables using wilcardFind.
s.addTables(moose.wildcardFind('/comptB/##[TYPE=Table2]'))
print("Streamer has %d table" % s.numTables)
assert s.numTables == len(tables), (s.numTables, len(tables))
moose.reinit()
moose.start(10)
if useStreamer:
# load the data
data = np.load(s.datafile)
header = str(data.dtype.names)
assert len(header) > 2**16
else:
data = { x.columnName : x.vector for x in tables }
return data
def METHOD_NAME():
# Get data without streamer and with streamer.
# These two must be the same.
X = buildLargeSystem(False) # without streamer
Y = buildLargeSystem(True) # with streamer.
# X has no time.
assert len(X) == len(Y.dtype.names)-1, (len(X), Y.dtype)
# same column names.
xNames = list(X.keys())
yNames = list(Y.dtype.names)
assert set(yNames) - set(xNames) == set(['time']), (yNames, xNames)
# Test for equality in some tables.
for i in range(1, 10):
a, b = Y[xNames[i]], X[xNames[i]]
assert a.shape == b.shape, (a.shape, b.shape)
assert (a == b).all(), (a-b)
def main( ):
test_small( )
METHOD_NAME()
print( '[INFO] All tests passed' )
if __name__ == '__main__':
main() |
298,897 | main | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Generate data for printing each instruction (format string + operands)
##
def regprinter(m):
str = m.group(1)
str += ":".join(["%d"]*len(m.group(2)))
str += m.group(3)
if ('S' in m.group(1)) and (len(m.group(2)) == 1):
str += "/%s"
elif ('C' in m.group(1)) and (len(m.group(2)) == 1):
str += "/%s"
return str
def spacify(s):
# Regular expression that matches any operator that contains '=' character:
opswithequal_re = '[-+^&|!<>=]?='
# Regular expression that matches any assignment operator.
assignment_re = '[-+^&|]?='
# Out of the operators that contain the = sign, if the operator is also an
# assignment, spaces will be added around it, unless it's enclosed within
# parentheses, or spaces are already present.
equals = re.compile(opswithequal_re)
assign = re.compile(assignment_re)
slen = len(s)
paren_count = {}
i = 0
pc = 0
while i < slen:
c = s[i]
if c == '(':
pc += 1
elif c == ')':
pc -= 1
paren_count[i] = pc
i += 1
# Iterate over all operators that contain the equal sign. If any
# match is also an assignment operator, add spaces around it if
# the parenthesis count is 0.
pos = 0
out = []
for m in equals.finditer(s):
ms = m.start()
me = m.end()
# t is the string that matched opswithequal_re.
t = m.string[ms:me]
out += s[pos:ms]
pos = me
if paren_count[ms] == 0:
# Check if the entire string t is an assignment.
am = assign.match(t)
if am and len(am.group(0)) == me-ms:
# Don't add spaces if they are already there.
if ms > 0 and s[ms-1] != ' ':
out.append(' ')
out += t
if me < slen and s[me] != ' ':
out.append(' ')
continue
# If this is not an assignment, just append it to the output
# string.
out += t
# Append the remaining part of the string.
out += s[pos:len(s)]
return ''.join(out)
def METHOD_NAME():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
immext_casere = re.compile(r'IMMEXT\(([A-Za-z])')
with open(sys.argv[3], 'w') as f:
for tag in hex_common.tags:
if not hex_common.behdict[tag]: continue
extendable_upper_imm = False
extendable_lower_imm = False
m = immext_casere.search(hex_common.semdict[tag])
if m:
if m.group(1).isupper():
extendable_upper_imm = True
else:
extendable_lower_imm = True
beh = hex_common.behdict[tag]
beh = hex_common.regre.sub(regprinter,beh)
beh = hex_common.absimmre.sub(r"#%s0x%x",beh)
beh = hex_common.relimmre.sub(r"PC+%s%d",beh)
beh = spacify(beh)
# Print out a literal "%s" at the end, used to match empty string
# so C won't complain at us
if ("A_VECX" in hex_common.attribdict[tag]):
macname = "DEF_VECX_PRINTINFO"
else: macname = "DEF_PRINTINFO"
f.write('%s(%s,"%s%%s"' % (macname,tag,beh))
regs_or_imms = \
hex_common.reg_or_immre.findall(hex_common.behdict[tag])
ri = 0
seenregs = {}
for allregs,a,b,c,d,allimm,immlett,bits,immshift in regs_or_imms:
if a:
#register
if b in seenregs:
regno = seenregs[b]
else:
regno = ri
if len(b) == 1:
f.write(', insn->regno[%d]' % regno)
if 'S' in a:
f.write(', sreg2str(insn->regno[%d])' % regno)
elif 'C' in a:
f.write(', creg2str(insn->regno[%d])' % regno)
elif len(b) == 2:
f.write(', insn->regno[%d] + 1, insn->regno[%d]' % \
(regno,regno))
else:
print("Put some stuff to handle quads here")
if b not in seenregs:
seenregs[b] = ri
ri += 1
else:
#immediate
if (immlett.isupper()):
if extendable_upper_imm:
if immlett in 'rR':
f.write(',insn->extension_valid?"##":""')
else:
f.write(',insn->extension_valid?"#":""')
else:
f.write(',""')
ii = 1
else:
if extendable_lower_imm:
if immlett in 'rR':
f.write(',insn->extension_valid?"##":""')
else:
f.write(',insn->extension_valid?"#":""')
else:
f.write(',""')
ii = 0
f.write(', insn->immed[%d]' % ii)
# append empty string so there is at least one more arg
f.write(',"")\n')
if __name__ == "__main__":
METHOD_NAME() |
298,898 | persistent load | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Most features of this module has been moved to monty. Please refer to
monty.json and monty.serialization documentation.
"""
from __future__ import annotations
import functools
import json
import pickle
import json
from typing import Any
from monty.json import MontyDecoder, MontyEncoder
from pymatgen.core.periodic_table import Element
def pmg_serialize(method):
"""
Decorator for methods that add MSON serializations keys
to the dictionary. See documentation of MSON for more details
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
d = method(*args, **kwargs)
# Add @module and @class
d["@module"] = type(self).__module__
d["@class"] = type(self).__name__
return d
return wrapper
def json_pretty_dump(obj: Any, filename: str) -> None:
"""
Serialize obj as a JSON formatted stream to the given filename (
pretty printing version)
"""
with open(filename, "w") as fh:
json.dump(obj, fh, indent=4, sort_keys=4)
class PmgPickler(pickle.Pickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_id(self, obj: Any):
"""Instead of pickling as a regular class instance, we emit a
persistent ID."""
if isinstance(obj, Element):
# Here, our persistent ID is simply a tuple, containing a tag and
# a key
return type(obj).__name__, obj.symbol
# If obj does not have a persistent ID, return None. This means obj
# needs to be pickled as usual.
return None
class PmgUnpickler(pickle.Unpickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def METHOD_NAME(self, pid):
"""
This method is invoked whenever a persistent ID is encountered.
Here, pid is the tuple returned by PmgPickler.
"""
try:
type_tag, key_id = pid
except Exception:
# Sometimes we get a string such as ('Element', u'C') instead
# of a real tuple. Use ast to evaluate the expression (much safer
# than eval).
import ast
type_tag, key_id = ast.literal_eval(pid)
if type_tag == "Element":
return Element(key_id)
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced
# by the persistent ID.
raise pickle.UnpicklingError(f"unsupported persistent object with pid {pid}")
def pmg_pickle_load(filobj, **kwargs) -> Any:
"""
Loads a pickle file and deserialize it with PmgUnpickler.
Args:
filobj: File-like object
**kwargs: Any of the keyword arguments supported by PmgUnpickler
Returns:
Deserialized object.
"""
return PmgUnpickler(filobj, **kwargs).load()
def pmg_pickle_dump(obj: Any, filobj, **kwargs):
"""
Dump an object to a pickle file using PmgPickler.
Args:
obj: Object to dump.
fileobj: File-like object
**kwargs: Any of the keyword arguments supported by PmgPickler
"""
return PmgPickler(filobj, **kwargs).dump(obj)
def mjson_load(filepath: str, **kwargs) -> Any:
"""
Read JSON file in MSONable format with MontyDecoder.
Return dict with python object.
"""
with open(filepath, "rt") as fh:
return json.load(fh, cls=MontyDecoder, **kwargs)
def mjson_loads(string: str, **kwargs) -> Any:
"""
Read JSON string in MSONable format with MontyDecoder.
Return python object
"""
return json.loads(string, cls=MontyDecoder, **kwargs)
def mjson_write(d: dict, filepath: str, **kwargs) -> None:
"""
Write dictionary d to filepath in JSON format using MontyDecoder
"""
with open(filepath, "wt") as fh:
json.dump(d, fh, cls=MontyEncoder, **kwargs) |
298,899 | format | """
Clean and validate a DataFrame column containing German company registry id (handelsregisternummer).
"""
# pylint: disable=too-many-lines, too-many-arguments, too-many-branches
from typing import Any, Union
from operator import itemgetter
import dask.dataframe as dd
import numpy as np
import pandas as pd
from stdnum.de import handelsregisternummer
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, to_dask
def clean_de_handelsregisternummer(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean German company registry id type data in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
col
The name of the column containing data of handelsregisternummer type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
Note: in the case of handelsregisternummer,
the compact format is the same as the standard one.
(default: "standard")
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of handelsregisternummer data.
>>> df = pd.DataFrame({
"handelsregisternummer": [
'Aachen HRA 11223',
'Aachen HRC 44123',]
})
>>> clean_de_handelsregisternummer(df, 'handelsregisternummer')
handelsregisternummer handelsregisternummer_clean
0 Aachen HRA 11223 Aachen HRA 11223
1 Aachen HRC 44123 NaN
"""
if output_format not in {"compact", "standard"}:
raise ValueError(
f"output_format {output_format} is invalid. " 'It needs to be "compact" or "standard".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [METHOD_NAME(x, output_format, errors) for x in srs],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
return df
def validate_de_handelsregisternummer(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is handelsregisternummer in a DataFrame column.
For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(handelsregisternummer.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(handelsregisternummer.is_valid)
else:
return df.applymap(handelsregisternummer.is_valid)
return handelsregisternummer.is_valid(df)
def METHOD_NAME(val: Any, output_format: str = "standard", errors: str = "coarse") -> Any:
"""
Reformat a number string with proper separators and whitespace.
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
Note: in the case of handelsregisternummer,
the compact format is the same as the standard one.
"""
val = str(val)
result: Any = []
if val in NULL_VALUES:
return [np.nan]
if not validate_de_handelsregisternummer(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
return [error_result]
if output_format in {"compact", "standard"}:
result = [handelsregisternummer.compact(val)] + result
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.