blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0e3c2da4c4a388401edbfd73f23d273acd23a6f
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/java_util/iterator.py
|
8cf0dd167570f6da9f6b04df7dfa7fc67f9725ab
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
iterator.py
|
import claripy
import logging
from ..java import JavaSimProcedure
from ...engines.soot.values import SimSootValue_ThisRef
from .collection import ELEMS, SIZE, INDEX
log = logging.getLogger(name=__name__)
class IteratorHasNext(JavaSimProcedure):
__provides__ = (("java.util.Iterator", "hasNext()"),)
def run(self, this_ref):
log.debug(f"Called SimProcedure java.util.Iterator.hasNext with args: {this_ref}")
if this_ref.symbolic:
return claripy.BoolS("iterator.hasNext")
iterator_size = this_ref.load_field(self.state, SIZE, "int")
iterator_index = this_ref.load_field(self.state, INDEX, "int")
has_next = self.state.solver.eval(iterator_index) < self.state.solver.eval(iterator_size)
return claripy.BoolV(has_next)
class IteratorNext(JavaSimProcedure):
__provides__ = (("java.util.Iterator", "next()"),)
def run(self, this_ref):
log.debug(f"Called SimProcedure java.util.Iterator.hasNext with args: {this_ref}")
if this_ref.symbolic:
return SimSootValue_ThisRef.new_object(self.state, "java.lang.Object", symbolic=True)
array_ref = this_ref.load_field(self.state, ELEMS, "java.lang.Object[]")
iterator_index = this_ref.load_field(self.state, INDEX, "int")
# TODO should check boundaries?
# Update index
new_iterator_index = claripy.BVV(self.state.solver.eval(iterator_index) + 1, 32)
this_ref.store_field(self.state, INDEX, "int", new_iterator_index)
return self.state.javavm_memory.load_array_element(array_ref, iterator_index)
|
eb25709a18f2180ff996f76e02185a9e04637d95
|
1664bc3e55c0e006c8bbf8671a2ba0043dc0203c
|
/mpf/devices/motor.py
|
b43f16b655567393ac0570de162915393c7e473e
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
missionpinball/mpf
|
d426b0b1b865a138f169aaf852741f39a880edf2
|
9f90c8b1586363b65340017bfa3af5d56d32c6d9
|
refs/heads/dev
| 2023-07-26T21:31:11.581205
| 2023-07-15T17:06:04
| 2023-07-15T17:06:04
| 21,267,545
| 191
| 173
|
MIT
| 2023-09-14T06:07:45
| 2014-06-27T07:26:26
|
Python
|
UTF-8
|
Python
| false
| false
| 9,728
|
py
|
motor.py
|
"""Motor device."""
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.system_wide_device import SystemWideDevice
@DeviceMonitor(_move_direction="move_direction", _target_position="target_position", _last_position="last_position")
class Motor(SystemWideDevice):
"""A motor which can be controlled using drivers."""
__slots__ = ["_target_position", "_last_position", "type", "_move_direction"]
config_section = 'motors'
collection = 'motors'
class_label = 'motor'
def __init__(self, machine, name):
"""Initialise motor."""
self._target_position = None
self._last_position = None
self.type = None
self._move_direction = "stopped"
super().__init__(machine, name)
async def _initialize(self):
await super()._initialize()
self._target_position = self.config['reset_position']
if self.config['reset_position'] not in self.config['position_switches']:
self.raise_config_error("Reset position {} not in positions {}".format(
self.config['reset_position'], self.config['position_switches']), 1)
if not self.config['motor_left_output'] and not self.config['motor_right_output']:
self.raise_config_error("Need either motor_left_output or motor_right_output", 2)
if self.config['motor_left_output'] == self.config['motor_right_output']:
self.raise_config_error("motor_left_output and motor_right_output need to be different", 3)
if self.config['motor_left_output'] and self.config['motor_right_output']:
self.type = "two_directions"
else:
self.type = "one_direction"
for position, switch in self.config['position_switches'].items():
self.machine.switch_controller.add_switch_handler_obj(switch, self._update_position,
callback_kwargs={"position": position})
# add handlers
for event, position in self.config['go_to_position'].items():
if position not in self.config['position_switches']:
self.raise_config_error("Invalid position {} in go_to_position".format(position), 4)
self.machine.events.add_handler(event, self.event_go_to_position, position=position)
if self.config['include_in_ball_search']:
self.machine.events.add_handler("ball_search_started",
self._ball_search_start)
self.machine.events.add_handler("ball_search_stopped",
self._ball_search_stop)
def _validate_last_position(self) -> bool:
"""Verify that at most one position switch is active."""
active_position_switches = [(position, switch) for position, switch in
self.config['position_switches'].items() if switch.state]
if len(active_position_switches) > 1:
self.warning_log("Found %s active position switches: %s. There should be only one position switch active "
"at a time.", len(active_position_switches),
active_position_switches)
self.machine.service.add_technical_alert(
self, "Multiple position switches are active: {}. Verify switches.".format(active_position_switches))
return False
return True
@event_handler(1)
def event_reset(self, **kwargs):
"""Event handler for reset event."""
del kwargs
self. reset()
def reset(self):
"""Go to reset position."""
self.go_to_position(self.config['reset_position'])
@event_handler(10)
def event_go_to_position(self, position=None, **kwargs):
"""Event handler for go_to_position event."""
del kwargs
if position is None:
raise AssertionError("Got go_to_position event without position.")
self.go_to_position(position)
def go_to_position(self, position):
"""Move motor to a specific position."""
self.log.info("Moving motor to position %s", position)
self._target_position = position
self._move_to_position(position)
def _move_to_position(self, position):
if not self._validate_last_position():
self.warning_log("Will not move motor because multiple position switches are active.")
self._stop_motor()
return
switch = self.config['position_switches'][position]
# check if we are already in this position
if self.machine.switch_controller.is_active(switch):
# already in position
self._reached_position(position)
self._stop_motor()
else:
if self.type == "two_directions":
if self._last_position:
assumed_position = self._last_position
else:
active_position_switches = [position for position, switch in
self.config['position_switches'].items()
if switch.state]
if len(active_position_switches) == 1:
assumed_position = active_position_switches[0]
self.debug_log("Assuming position based on switches to be %s", assumed_position)
else:
assumed_position = None
if assumed_position is None and \
list(self.config['position_switches']).index(self.config['reset_position']) == 0:
self._move_left()
elif assumed_position is None:
self._move_right()
elif list(self.config['position_switches']).index(assumed_position) > \
list(self.config['position_switches']).index(position):
self._move_left()
else:
self._move_right()
else:
# not in position. start motor
if self.config['motor_left_output']:
self._move_left()
else:
self._move_right()
def _update_position(self, position, **kwargs):
"""Handle that motor reached a certain position."""
del kwargs
first_known_position = self._last_position is None
if not self._validate_last_position():
self.warning_log("Will stop motor because multiple position switches are active.")
self._stop_motor()
self._last_position = None
return
self._last_position = position
if position == self._target_position:
self._reached_position(position)
else:
self.debug_log("Motor is at position %s", position)
if self.type == "two_directions":
# special case: initial position has been unknown and we reached our first position
# we might have moved in the wrong direction so correct this now
if first_known_position and self._move_direction == "right" and \
list(self.config['position_switches']).index(self._last_position) > \
list(self.config['position_switches']).index(self._target_position):
self._move_left()
elif first_known_position and self._move_direction == "left" and \
list(self.config['position_switches']).index(self._last_position) < \
list(self.config['position_switches']).index(self._target_position):
self._move_right()
elif list(self.config['position_switches']).index(position) in \
(0, len(self.config['position_switches']) - 1):
self.warning_log("Motor hit end switch %s unexpectedly. Stopping motor.", position)
self._stop_motor()
def _reached_position(self, position):
"""Handle that motor handled its target position."""
self.info_log("Motor reached position %s. Stopping motor.", position)
self.machine.events.post("motor_{}_reached_{}".format(self.name, position))
'''event: motor_(name)_reached_(position)
desc: A motor device called (name) reached position (position)
(device)
'''
# disable motor
self._stop_motor()
def _stop_motor(self):
if self.config['motor_left_output']:
self.config['motor_left_output'].disable()
if self.config['motor_right_output']:
self.config['motor_right_output'].disable()
self._move_direction = "stopped"
def _move_right(self):
if self.config['motor_left_output']:
self.config['motor_left_output'].disable()
self.config['motor_right_output'].enable()
self._move_direction = "right"
def _move_left(self):
if self.config['motor_right_output']:
self.config['motor_right_output'].disable()
self.config['motor_left_output'].enable()
self._move_direction = "left"
def _ball_search_start(self, **kwargs):
del kwargs
self._stop_motor()
# simply enable motor. will move to old position afterwards.
if self.config['motor_left_output']:
self.config['motor_left_output'].enable()
else:
self.config['motor_right_output'].enable()
def _ball_search_stop(self, **kwargs):
del kwargs
# move to last position
self._move_to_position(self._target_position)
|
a4fa14afb731861d087b8d1b41a5608334748717
|
7c8f7a4e4fe04d65d92853ec0d7c466e55379ab4
|
/clients/python-client/parsr_client/__init__.py
|
f26ebf1276e5ee0b38eb7e73aa3b4e8f34139647
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-commercial-license",
"AGPL-3.0-or-later"
] |
permissive
|
axa-group/Parsr
|
d04fae6ab0d8893f0de77423b9ddb8992b0689a9
|
b1fc36fc91531704235438e844bbf6315e889f86
|
refs/heads/master
| 2023-08-10T19:15:32.639082
| 2022-12-06T12:02:02
| 2022-12-06T12:02:02
| 200,653,543
| 5,467
| 313
|
Apache-2.0
| 2023-03-02T21:15:27
| 2019-08-05T12:43:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
__init__.py
|
from .parsr_client import ParsrClient
from .parsr_output_interpreter import ParsrOutputInterpreter
|
1808c2182e98fca1727d697aa7b6625820584edd
|
271780e3b208b218d3a570b10244d3b22a283d43
|
/scripts/attr_translations_to_db.py
|
4b85a58da377d0665e7e3718da45bc968e30ade9
|
[] |
no_license
|
alexandersimoes/oec
|
d312b5ecec1d502f9d7ca4d15b5db228d7984718
|
d8d392f8ddd81dcd04fb8cc41697de104fcdf233
|
refs/heads/master
| 2021-04-12T11:08:23.328023
| 2020-11-28T02:31:01
| 2020-11-28T02:31:01
| 3,593,949
| 135
| 44
| null | 2021-03-19T22:02:43
| 2012-03-01T16:59:42
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,791
|
py
|
attr_translations_to_db.py
|
# -*- coding: utf-8 -*-
"""
Add HS/SITC product translations to DB
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Raw data from this file -
https://docs.google.com/spreadsheets/d/1mPG5zgQmeh3vRsGQrIOq8hPONXNRGW1OC449ExAqMVs/
"""
import sys, MySQLdb, time, os, csv, argparse, click
''' Connect to DB '''
db = MySQLdb.connect(host=os.environ.get("OEC_DB_HOST"), user=os.environ.get("OEC_DB_USER"),
passwd=os.environ.get("OEC_DB_PW"),
db=os.environ.get("OEC_DB_NAME"))
db.autocommit(1)
cursor = db.cursor()
oec_langs = ['ar','de','el','en','es','fr','he','hi','it','ja','ko','mn','nl','ru','pt','tr','vi','zh_cn']
@click.command()
@click.argument('file_path', type=click.File('rb'))
@click.option('-l', '--lang', prompt='Language', help='2 letter lang code of translations', required=True, type=click.Choice(oec_langs))
@click.option('-c', '--cls', prompt='Classification', required=True, type=click.Choice(['hs', 'hs92', 'hs96', 'hs02', 'hs07', 'sitc', 'country']))
def main(file_path, lang, cls):
'''Initialize lookup for column indicies'''
col_positions = {
"id": None,
"name": None
}
if cls in ['hs', 'sitc', 'hs92', 'hs96', 'hs02', 'hs07']:
col_positions["desc"] = None
col_positions["keywords"] = None
col_positions["article"] = None
col_positions["plural"] = None
col_positions["gender"] = None
with file_path as csv_file:
'''if the lang is not english skip the first line (these are the instructions)'''
if lang != "en": next(csv_file)
csv_reader = csv.reader(csv_file, delimiter='\t', quotechar='"')
for row, data in enumerate(csv_reader):
'''if its the first row setup col_positions'''
if row == 0:
print; print "First row index and column names for reference: "
for column, column_header in enumerate(data):
print column, column_header
print
'''step thru each column to set indicies'''
for col_name in col_positions:
try:
col_positions[col_name] = int(raw_input("Index for " + col_name + " column: "))
except:
print "Must be integer..."; sys.exit()
print "Adding names to DB..."
continue
'''make a copy of col_positions so we dont overwrite original'''
vals = col_positions.copy()
'''fill in vals with data from CSV'''
for col_name in vals:
vals[col_name] = data[col_positions[col_name]] or None
vals[col_name] = None if vals[col_name] == "#VALUE!" else vals[col_name]
if col_name == "keywords" and vals[col_name]:
vals[col_name] = [x.strip() for x in vals[col_name].replace("و", ",").replace(",", ",").replace("、", ",").split(",")]
vals[col_name] = ", ".join(vals[col_name])
if vals["name"]:
if cls in ['hs', 'sitc', 'hs92', 'hs96', 'hs02', 'hs07']:
indicies = [vals["id"], lang]
data_vals = [vals["name"], vals["keywords"], vals["desc"], vals["gender"], vals["plural"], vals["article"]]
sql = """
INSERT INTO attr_{0}_name
({0}_id, lang, name, keywords, `desc`, gender, plural, article)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
name=%s, keywords=%s, `desc`=%s, gender=%s, plural=%s, article=%s
""".format(cls)
else:
indicies = [vals["id"], lang]
data_vals = [vals["name"]]
sql = """
INSERT INTO attr_{0}_name
(origin_id, lang, name)
VALUES
(%s, %s, %s)
ON DUPLICATE KEY UPDATE
name=%s
""".format(cls)
# print sql
# sys.exit()
try:
cursor.execute(sql, indicies+data_vals+data_vals)
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
print indicies
# sys.exit()
if __name__ == "__main__":
main()
|
e3dbd59febbd8898002c015639eb5a2d971d6910
|
52a15d4fabf68bf23a23799312ae40465764908c
|
/installation/migrations/dbschema.external-authentication.py
|
656909a205b8d2cc7263f50eec1ba552e37407eb
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
jensl/critic
|
2071a1b0600051967323df48f4d3a5656a5d2bb8
|
c2d962b909ff7ef2f09bccbeb636333920b3659e
|
refs/heads/stable/1
| 2022-05-28T03:51:15.108944
| 2018-03-27T18:47:46
| 2018-03-29T15:08:30
| 6,430,552
| 224
| 36
|
NOASSERTION
| 2023-05-29T15:38:00
| 2012-10-28T18:26:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
dbschema.external-authentication.py
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2014 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import psycopg2
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=int)
parser.add_argument("--gid", type=int)
arguments = parser.parse_args()
os.setgid(arguments.gid)
os.setuid(arguments.uid)
import configuration
db = psycopg2.connect(**configuration.database.PARAMETERS)
cursor = db.cursor()
def create(table_name, statement):
try:
# Make sure the table doesn't already exist.
cursor.execute("SELECT 1 FROM %s" % table_name)
# Above statement would have thrown a psycopg2.ProgrammingError if the
# table didn't exist, but it didn't, so assume the table doesn't need to
# be added.
return
except psycopg2.ProgrammingError:
db.rollback()
cursor.execute(statement)
db.commit()
create("externalusers", """
CREATE TABLE externalusers
( id SERIAL PRIMARY KEY,
uid INTEGER REFERENCES users,
provider VARCHAR(16) NOT NULL,
account VARCHAR(256) NOT NULL,
email VARCHAR(256),
token VARCHAR(256),
UNIQUE (provider, account) );
""")
create("oauthstates", """
CREATE TABLE oauthstates
( state VARCHAR(64) PRIMARY KEY,
url TEXT,
time TIMESTAMP NOT NULL DEFAULT NOW() );
""")
db.commit()
db.close()
|
9d697ed1ae69e72d127db9a865b0bb53656b46b8
|
c29eba01ce299ebb27b886a83e19e59add7e2f6b
|
/src/pytest_cases/fixture_parametrize_plus.py
|
644ebefdbeccf995f507485c0fdf758ea205c951
|
[
"BSD-3-Clause"
] |
permissive
|
smarie/python-pytest-cases
|
e87516e73d5067d5c307c7fdb37cc5f1f97c417e
|
ab3b7190d728b18512141b9f5f3a1c3dfc7cedf2
|
refs/heads/main
| 2023-07-08T11:41:57.278697
| 2023-02-23T13:11:25
| 2023-02-23T13:11:25
| 138,296,136
| 286
| 40
|
BSD-3-Clause
| 2023-07-03T14:57:02
| 2018-06-22T11:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 68,879
|
py
|
fixture_parametrize_plus.py
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
from inspect import isgeneratorfunction
from warnings import warn
try: # python 3.3+
from inspect import signature, Parameter
except ImportError:
from funcsigs import signature, Parameter # noqa
try:
from collections.abc import Iterable
except ImportError: # noqa
from collections import Iterable
try:
from typing import Union, Callable, List, Any, Sequence, Optional, Type, Tuple, TypeVar # noqa
from types import ModuleType # noqa
T = TypeVar('T', bound=Union[Type, Callable])
except ImportError:
pass
import pytest
from makefun import with_signature, remove_signature_parameters, add_signature_parameters, wraps
from .common_mini_six import string_types
from .common_others import AUTO, robust_isinstance, replace_list_contents
from .common_pytest_marks import has_pytest_param, get_param_argnames_as_list
from .common_pytest_lazy_values import is_lazy_value, get_lazy_args
from .common_pytest import get_fixture_name, remove_duplicates, mini_idvalset, is_marked_parameter_value, \
extract_parameterset_info, ParameterSet, cart_product_pytest, mini_idval, inject_host, \
get_marked_parameter_values, resolve_ids, get_marked_parameter_id, get_marked_parameter_marks, is_fixture, \
safe_isclass
from .fixture__creation import check_name_available, CHANGE, WARN
from .fixture_core1_unions import InvalidParamsList, NOT_USED, UnionFixtureAlternative, _make_fixture_union, \
_make_unpack_fixture, UnionIdMakers
from .fixture_core2 import _create_param_fixture, fixture
def _fixture_product(fixtures_dest,
name, # type: str
fixtures_or_values,
fixture_positions,
scope="function", # type: str
unpack_into=None, # type: Iterable[str]
autouse=False, # type: bool
hook=None, # type: Callable[[Callable], Callable]
caller=None, # type: Callable
**kwargs):
"""
Internal implementation for fixture products created by pytest parametrize plus.
:param fixtures_dest:
:param name:
:param fixtures_or_values:
:param fixture_positions:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures_or_values, (tuple, set, list)):
raise TypeError("fixture_product: the `fixtures_or_values` argument should be a tuple, set or list")
else:
has_lazy_vals = any(is_lazy_value(v) for v in fixtures_or_values)
_tuple_size = len(fixtures_or_values)
# first get all required fixture names
f_names = [None] * _tuple_size
for f_pos in fixture_positions:
# possibly get the fixture name if the fixture symbol was provided
f = fixtures_or_values[f_pos]
if isinstance(f, fixture_ref):
f = f.fixture
# and remember the position in the tuple
f_names[f_pos] = get_fixture_name(f)
# remove duplicates by making it an ordered set
all_names = remove_duplicates((n for n in f_names if n is not None))
if len(all_names) < 1:
raise ValueError("Empty fixture products are not permitted")
def _tuple_generator(request, all_fixtures):
for i in range(_tuple_size):
fix_at_pos_i = f_names[i]
if fix_at_pos_i is None:
# fixed value
# note: wouldn't it be almost as efficient but more readable to *always* call handle_lazy_args?
yield get_lazy_args(fixtures_or_values[i], request) if has_lazy_vals else fixtures_or_values[i]
else:
# fixture value
yield all_fixtures[fix_at_pos_i]
# then generate the body of our product fixture. It will require all of its dependent fixtures
@with_signature("(request, %s)" % ', '.join(all_names))
def _new_fixture(request, **all_fixtures):
return tuple(_tuple_generator(request, all_fixtures))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but fixture so that NOT_USED is discarded
f_decorator = fixture(scope=scope, autouse=autouse, hook=hook, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(fixtures_dest, name, if_name_exists=WARN, caller=caller)
setattr(fixtures_dest, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
# note that as for fixture unions, we can not expose the `in_cls` parameter.
# but there is an easy workaround if unpacking is needed: call unpack_fixture separately
_make_unpack_fixture(fixtures_dest, argnames=unpack_into, fixture=name, hook=hook, in_cls=False)
return fix
_make_fixture_product = _fixture_product
"""A readable alias for callers not using the returned symbol"""
class fixture_ref(object): # noqa
"""
A reference to a fixture, to be used in `@parametrize`.
You can create it from a fixture name or a fixture object (function).
"""
__slots__ = 'fixture', 'theoretical_size', '_id'
def __init__(self,
fixture, # type: Union[str, Callable]
id=None, # type: str # noqa
):
"""
:param fixture: the name of the fixture to reference, or the fixture function itself
:param id: an optional custom id to override the fixture name in ids.
"""
self.fixture = get_fixture_name(fixture)
self._id = id
self.theoretical_size = None # we dont know yet, will be filled by @parametrize
def get_name_for_id(self):
"""return the name to use in ids."""
return self._id if self._id is not None else self.fixture
def __str__(self):
# used in mini_idval for example
return self.get_name_for_id()
def __repr__(self):
if self._id is not None:
return "fixture_ref<%s, id=%s>" % (self.fixture, self._id)
else:
return "fixture_ref<%s>" % self.fixture
def _check_iterable(self):
"""Raise a TypeError if this fixture reference is not iterable, that is, it does not represent a tuple"""
if self.theoretical_size is None:
raise TypeError("This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. "
"This is not supposed to happen when a `fixture_ref` is used correctly, i.e. as an item in"
" the `argvalues` of a `@parametrize` decorator. Please check the documentation for "
"details.")
if self.theoretical_size == 1:
raise TypeError("This fixture_ref does not represent a tuple of arguments, it is not iterable")
def __len__(self):
self._check_iterable()
return self.theoretical_size
def __getitem__(self, item):
"""
Returns an item in the tuple described by this fixture_ref.
This is just a facade, a FixtureRefItem.
Note: this is only used when a custom `idgen` is passed to @parametrized
"""
self._check_iterable()
return FixtureRefItem(self, item)
class FixtureRefItem(object):
"""An item in a fixture_ref when this fixture_ref is used as a tuple."""
__slots__ = 'host', 'item'
def __init__(self,
host, # type: fixture_ref
item # type: int
):
self.host = host
self.item = item
def __repr__(self):
return "%r[%s]" % (self.host, self.item)
# Fix for https://github.com/smarie/python-pytest-cases/issues/71
# In order for pytest to allow users to import this symbol in conftest.py
# they should be declared as optional plugin hooks.
# A workaround otherwise would be to remove the 'pytest_' name prefix
# See https://github.com/pytest-dev/pytest/issues/6475
@pytest.hookimpl(optionalhook=True)
def pytest_parametrize_plus(*args,
**kwargs):
warn("`pytest_parametrize_plus` and `parametrize_plus` are deprecated. Please use the new alias `parametrize`. "
"See https://github.com/pytest-dev/pytest/issues/6475", category=DeprecationWarning, stacklevel=2)
return parametrize(*args, **kwargs)
parametrize_plus = pytest_parametrize_plus
class ParamAlternative(UnionFixtureAlternative):
"""Defines an "alternative", used to parametrize a fixture union in the context of parametrize
It is similar to a union fixture alternative, except that it also remembers the parameter argnames.
They are used to generate the test id corresponding to this alternative. See `_get_minimal_id` implementations.
`ParamIdMakers` overrides some of the idstyles in `UnionIdMakers` so as to adapt them to these `ParamAlternative`
objects.
"""
__slots__ = ('argnames', 'decorated')
def __init__(self,
union_name, # type: str
alternative_name, # type: str
param_index, # type: int
argnames, # type: Sequence[str]
decorated # type: Callable
):
"""
:param union_name: the name of the union fixture created by @parametrize to switch between param alternatives
:param alternative_name: the name of the fixture created by @parametrize to represent this alternative
:param param_index: the index of this parameter in the list of argvalues passed to @parametrize
:param argnames: the list of parameter names in @parametrize
:param decorated: the test function or fixture that this alternative refers to
"""
super(ParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
alternative_index=param_index)
self.argnames = argnames
self.decorated = decorated
def get_union_id(self):
return ("(%s)" % ",".join(self.argnames)) if len(self.argnames) > 1 else self.argnames[0]
def get_alternative_idx(self):
return "P%s" % self.alternative_index
def get_alternative_id(self):
"""Subclasses should return the smallest id representing this parametrize fixture union alternative"""
raise NotImplementedError()
class SingleParamAlternative(ParamAlternative):
"""alternative class for single parameter value"""
__slots__ = 'argval', 'id'
def __init__(self,
union_name, # type: str
alternative_name, # type: str
param_index, # type: int
argnames, # type: Sequence[str]
argval, # type: Any
id, # type: Optional[str]
decorated # type: Callable
):
"""
:param union_name: the name of the union fixture created by @parametrize to switch between param alternatives
:param alternative_name: the name of the fixture created by @parametrize to represent this alternative
:param param_index: the index of this parameter in the list of argvalues passed to @parametrize
:param argnames: the list of parameter names in @parametrize
:param argval: the value used by this parameter
"""
super(SingleParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
param_index=param_index, argnames=argnames, decorated=decorated)
self.argval = argval
self.id = id
def get_alternative_id(self):
"""Since this alternative has no further parametrization (simplification for 1-param alternative),
we create here the equivalent of the id of the argvalue if it was used as a parameter"""
if self.id is not None:
# custom id from `@parametrize(ids=<callable_or_list>)`
return self.id
else:
return mini_idvalset(self.argnames, self.argval, idx=self.alternative_index)
@classmethod
def create(cls,
new_fixture_host, # type: Union[Type, ModuleType]
test_func, # type: Callable
param_union_name, # type: str
argnames, # type: Sequence[str]
i, # type: int
argvalue, # type: Any
id, # type: Union[str, Callable]
hook=None, # type: Callable
debug=False # type: bool
):
# type: (...) -> SingleParamAlternative
"""
Creates an alternative for fixture union `param_union_name`, to handle single parameter value
argvalue = argvalues[i] in @parametrize.
This alternative will refer to a newly created fixture in `new_fixture_host`, that will return `argvalue`.
:param new_fixture_host: host (class, module) where the new fixture should be created
:param test_func:
:param param_union_name:
:param argnames:
:param i:
:param argvalue: a (possibly marked with pytest.param) argvalue
:param hook:
:param debug:
:return:
"""
nb_params = len(argnames)
param_names_str = '_'.join(argnames).replace(' ', '')
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func.__name__, param_names_str, i)
p_fix_name = check_name_available(new_fixture_host, p_fix_name, if_name_exists=CHANGE, caller=parametrize)
if debug:
print(" - Creating new fixture %r to handle parameter %s" % (p_fix_name, i))
# Now we'll create the fixture that will return the unique parameter value
# since this parameter is unique, we do not parametrize the fixture (_create_param_fixture "auto_simplify" flag)
# for this reason the possible pytest.param ids and marks have to be set somewhere else: we move them
# to the alternative.
# unwrap possible pytest.param on the argvalue to move them on the SingleParamAlternative
has_pytestparam_wrapper = is_marked_parameter_value(argvalue)
if has_pytestparam_wrapper:
p_id = get_marked_parameter_id(argvalue)
p_marks = get_marked_parameter_marks(argvalue)
argvalue = get_marked_parameter_values(argvalue, nbargs=nb_params)
if nb_params == 1:
argvalue = argvalue[0]
# Create the fixture. IMPORTANT auto_simplify=True : we create a NON-parametrized fixture.
_create_param_fixture(new_fixture_host, argname=p_fix_name, argvalues=(argvalue,),
hook=hook, auto_simplify=True, debug=debug)
# Create the alternative
argvals = (argvalue,) if nb_params == 1 else argvalue
p_fix_alt = SingleParamAlternative(union_name=param_union_name, alternative_name=p_fix_name,
argnames=argnames, param_index=i, argval=argvals, id=id,
decorated=test_func)
# Finally copy the custom id/marks on the ParamAlternative if any
if has_pytestparam_wrapper:
p_fix_alt = ParameterSet(values=(p_fix_alt,), id=p_id, marks=p_marks) # noqa
return p_fix_alt
class MultiParamAlternative(ParamAlternative):
"""alternative class for multiple parameter values"""
__slots__ = 'param_index_from', 'param_index_to'
def __init__(self,
union_name, # type: str
alternative_name, # type: str
argnames, # type: Sequence[str]
param_index_from, # type: int
param_index_to, # type: int
decorated # type: Callable
):
"""
:param union_name: the name of the union fixture created by @parametrize to switch between param alternatives
:param alternative_name: the name of the fixture created by @parametrize to represent this alternative
:param argnames: the list of parameter names in @parametrize
:param param_index_from: the beginning index of the parameters covered by <alternative_name> in the list of
argvalues passed to @parametrize
:param param_index_to: the ending index of the parameters covered by <alternative_name> in the list of
argvalues passed to @parametrize
"""
# set the param_index to be None since here we represent several indices
super(MultiParamAlternative, self).__init__(union_name=union_name, alternative_name=alternative_name,
argnames=argnames, param_index=None, decorated=decorated # noqa
)
self.param_index_from = param_index_from
self.param_index_to = param_index_to
def __str__(self):
return "%s/%s/" % (self.get_union_id(), self.get_alternative_idx())
def get_alternative_idx(self):
return "P%s:%s" % (self.param_index_from, self.param_index_to)
def get_alternative_id(self):
# The alternative id is the parameter range - the parameter themselves appear on the referenced fixture
return self.get_alternative_idx()
@classmethod
def create(cls,
new_fixture_host, # type: Union[Type, ModuleType]
test_func, # type: Callable
param_union_name, # type: str
argnames, # type: Sequence[str]
from_i, # type: int
to_i, # type: int
argvalues, # type: Any
ids, # type: Union[Sequence[str], Callable]
hook=None, # type: Callable
debug=False # type: bool
):
# type: (...) -> MultiParamAlternative
"""
Creates an alternative for fixture union `param_union_name`, to handle a group of consecutive parameters
argvalues[from_i:to_i] in @parametrize. Note that here the received `argvalues` should be already sliced
This alternative will refer to a newly created fixture in `new_fixture_host`, that will be parametrized to
return each of `argvalues`.
:param new_fixture_host:
:param test_func:
:param param_union_name:
:param argnames:
:param from_i:
:param to_i:
:param argvalues:
:param hook:
:param debug:
:return:
"""
nb_params = len(argnames)
param_names_str = '_'.join(argnames).replace(' ', '')
# Create a unique fixture name
p_fix_name = "%s_%s_is_P%stoP%s" % (test_func.__name__, param_names_str, from_i, to_i - 1)
p_fix_name = check_name_available(new_fixture_host, p_fix_name, if_name_exists=CHANGE, caller=parametrize)
if debug:
print(" - Creating new fixture %r to handle parameters %s to %s" % (p_fix_name, from_i, to_i - 1))
# Create the fixture
# - it will be parametrized to take all the values in argvalues
# - therefore it will use the custom ids and marks if any
# - it will be unique (not unfolded) so if there are more than 1 argnames we have to add a layer of tuple in the
# values
if nb_params > 1:
# we have to create a tuple around the vals because we have a SINGLE parameter that is a tuple
unmarked_argvalues = []
new_argvals = []
for v in argvalues:
if is_marked_parameter_value(v):
# transform the parameterset so that it contains a tuple of length 1
vals = get_marked_parameter_values(v, nbargs=nb_params)
if nb_params == 1:
vals = vals[0]
unmarked_argvalues.append(vals)
new_argvals.append(ParameterSet((vals,),
id=get_marked_parameter_id(v),
marks=get_marked_parameter_marks(v)))
else:
# nothing special to do since there is no pytest.param here
new_argvals.append(v)
unmarked_argvalues.append(v)
argvalues = new_argvals
# we also have to generate the ids correctly "as if they were multiple"
try:
iter(ids)
except TypeError:
if ids is not None:
ids = ["-".join(ids(vi) for vi in v) for v in unmarked_argvalues]
else:
ids = [mini_idvalset(argnames, vals, i) for i, vals in enumerate(unmarked_argvalues)]
_create_param_fixture(new_fixture_host, argname=p_fix_name, argvalues=argvalues, ids=ids, hook=hook,
debug=debug)
# Create the corresponding alternative
# note: as opposed to SingleParamAlternative, no need to move the custom id/marks to the ParamAlternative
# since they are set on the created parametrized fixture above
return MultiParamAlternative(union_name=param_union_name, alternative_name=p_fix_name, argnames=argnames,
param_index_from=from_i, param_index_to=to_i, decorated=test_func)
class FixtureParamAlternative(SingleParamAlternative):
"""alternative class for a single parameter containing a fixture ref"""
def __init__(self,
union_name, # type: str
fixture_ref, # type: fixture_ref
argnames, # type: Sequence[str]
param_index, # type: int
id, # type: Optional[str]
decorated # type: Callable
):
"""
:param union_name: the name of the union fixture created by @parametrize to switch between param alternatives
:param param_index: the index of this parameter in the list of argvalues passed to @parametrize
:param argnames: the list of parameter names in @parametrize
:param fixture_ref: the fixture reference used in this alternative
"""
# set alternative_name using the fixture name in fixture_ref
super(FixtureParamAlternative, self).__init__(union_name=union_name,
alternative_name=fixture_ref.fixture,
argnames=argnames, param_index=param_index,
argval=fixture_ref, id=id, decorated=decorated)
def get_alternative_idx(self):
return "P%sF" % self.alternative_index
def get_alternative_id(self):
if self.id is not None:
# custom id from `@parametrize(ids=<callable_or_list>)`
return self.id
else:
# ask the fixture_ref for an id: it can be the fixture name or a custom id
return self.argval.get_name_for_id()
class ProductParamAlternative(SingleParamAlternative):
"""alternative class for a single product parameter containing fixture refs"""
def get_alternative_idx(self):
return "P%sF" % self.alternative_index
def get_alternative_id(self):
"""Similar to SingleParamAlternative: create an id representing this tuple, since the fixture won't be
parametrized"""
if self.id is not None:
# custom id from `@parametrize(ids=<callable_or_list>)`
return self.id
else:
argval = tuple(t if not robust_isinstance(t, fixture_ref) else t.get_name_for_id() for t in self.argval)
return mini_idvalset(self.argnames, argval, idx=self.alternative_index)
# if PYTEST54_OR_GREATER:
# # an empty string will be taken into account but NOT filtered out in CallSpec2.id.
# # so instead we create a dedicated unique string and return it.
# # Ugly but the only viable alternative seems worse: it would be to return an empty string
# # and in `remove_empty_ids` to always remove all empty strings (not necessary the ones set by us).
# # That is too much of a change.
EMPTY_ID = "<pytest_cases_empty_id>"
if has_pytest_param:
def remove_empty_ids(callspec):
# used by plugin.py to remove the EMPTY_ID from the callspecs
replace_list_contents(callspec._idlist, [c for c in callspec._idlist if not c.startswith(EMPTY_ID)])
else:
def remove_empty_ids(callspec):
# used by plugin.py to remove the EMPTY_ID from the callspecs
replace_list_contents(callspec._idlist, [c for c in callspec._idlist if not c.endswith(EMPTY_ID)])
# elif PYTEST421_OR_GREATER:
# # an empty string will be taken into account and filtered out in CallSpec2.id.
# # but.... if this empty string appears several times in the tests it is appended with a number to become unique :(
# EMPTY_ID = ""
#
# else:
# # an empty string will only be taken into account if its truth value is True
# # but.... if this empty string appears several times in the tests it is appended with a number to become unique :(
# # it will be filtered out in CallSpec2.id
# class EmptyId(str):
# def __new__(cls):
# return str.__new__(cls, "")
#
# def __nonzero__(self):
# # python 2
# return True
#
# def __bool__(self):
# # python 3
# return True
#
# EMPTY_ID = EmptyId()
class ParamIdMakers(UnionIdMakers):
""" 'Enum' of id styles for param ids
It extends UnionIdMakers to adapt to the special fixture alternatives `ParamAlternative` we create
in @parametrize
"""
@classmethod
def nostyle(cls,
param # type: ParamAlternative
):
if isinstance(param, MultiParamAlternative):
# make an empty minimal id since the parameter themselves will appear as ids separately
# note if the final id is empty it will be dropped by the filter in CallSpec2.id
return EMPTY_ID
else:
return UnionIdMakers.nostyle(param)
# @classmethod
# def explicit(cls,
# param # type: ParamAlternative
# ):
# """Same than parent but display the argnames as prefix instead of the fixture union name generated by
# @parametrize, because the latter is too complex (for unicity reasons)"""
# return "%s/%s" % (, param.get_id(prepend_index=True))
_IDGEN = object()
def parametrize(argnames=None, # type: Union[str, Tuple[str], List[str]]
argvalues=None, # type: Iterable[Any]
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle=None, # type: Union[str, Callable]
idgen=_IDGEN, # type: Union[str, Callable]
auto_refs=True, # type: bool
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
# type: (...) -> Callable[[T], T]
"""
Equivalent to `@pytest.mark.parametrize` but also supports
(1) new alternate style for argnames/argvalues. One can also use `**args` to pass additional `{argnames: argvalues}`
in the same parametrization call. This can be handy in combination with `idgen` to master the whole id template
associated with several parameters. Note that you can pass coma-separated argnames too, by de-referencing a dict:
e.g. `**{'a,b': [(0, True), (1, False)], 'c': [-1, 2]}`.
(2) new alternate style for ids. One can use `idgen` instead of `ids`. `idgen` can be a callable receiving all
parameters at once (`**args`) and returning an id ; or it can be a string template using the new-style string
formatting where the argnames can be used as variables (e.g. `idgen=lambda **args: "a={a}".format(**args)` or
`idgen="my_id where a={a}"`). The special `idgen=AUTO` symbol can be used to generate a default string template
equivalent to `lambda **args: "-".join("%s=%s" % (n, v) for n, v in args.items())`. This is enabled by default
if you use the alternate style for argnames/argvalues (e.g. if `len(args) > 0`), and if there are no `fixture_ref`s
in your argvalues.
(3) new possibilities in argvalues:
- one can include references to fixtures with `fixture_ref(<fixture>)` where <fixture> can be the fixture name or
fixture function. When such a fixture reference is detected in the argvalues, a new function-scope "union"
fixture will be created with a unique name, and the test function will be wrapped so as to be injected with the
correct parameters from this fixture. Special test ids will be created to illustrate the switching between the
various normal parameters and fixtures. You can see debug print messages about all fixtures created using
`debug=True`
- one can include lazy argvalues with `lazy_value(<valuegetter>, [id=..., marks=...])`. A `lazy_value` is the same
thing than a function-scoped fixture, except that the value getter function is not a fixture and therefore can
neither be parametrized nor depend on fixtures. It should have no mandatory argument.
Both `fixture_ref` and `lazy_value` can be used to represent a single argvalue, or a whole tuple of argvalues when
there are several argnames. Several of them can be used in a tuple.
Finally, `pytest.param` is supported even when there are `fixture_ref` and `lazy_value`.
An optional `hook` can be passed, to apply on each fixture function that is created during this call. The hook
function will be called every time a fixture is about to be created. It will receive a single argument (the
function implementing the fixture) and should return the function to use. For example you can use `saved_fixture`
from `pytest-harvest` as a hook in order to save all such created fixtures in the fixture store.
:param argnames: same as in pytest.mark.parametrize
:param argvalues: same as in pytest.mark.parametrize except that `fixture_ref` and `lazy_value` are supported
:param indirect: same as in pytest.mark.parametrize. Note that it is not recommended and is not guaranteed to work
in complex parametrization scenarii.
:param ids: same as in pytest.mark.parametrize. Note that an alternative way to create ids exists with `idgen`. Only
one non-None `ids` or `idgen should be provided.
:param idgen: an id formatter. Either a string representing a template, or a callable receiving all argvalues
at once (as opposed to the behaviour in pytest ids). This alternative way to generate ids can only be used when
`ids` is not provided (None). You can use the special `AUTO` formatter to generate an automatic id with
template <name>=<value>-<name2>=<value2>-etc. `AUTO` is enabled by default if you use the alternate style for
argnames/argvalues (e.g. if `len(args) > 0`), and if there are no `fixture_ref`s in your argvalues.
:param auto_refs: a boolean. If this is `True` (default), argvalues containing fixture symbols will automatically
be wrapped into a `fixture_ref`, for convenience.
:param idstyle: This is mostly for debug. Style of ids to be used in the "union" fixtures generated by
`@parametrize` if at least one `fixture_ref` is found in the argvalues. `idstyle` possible values are
'compact', 'explicit' or None/'nostyle' (default), or a callable. `idstyle` has no effect if no `fixture_ref`
are present in the argvalues. As opposed to `ids`, a callable provided here will receive a `ParamAlternative`
object indicating which generated fixture should be used. See `ParamIdMakers`.
:param scope: The scope of the union fixture to create if `fixture_ref`s are found in the argvalues. Otherwise same
as in pytest.mark.parametrize.
:param hook: an optional hook to apply to each fixture function that is created during this call. The hook function
will be called every time a fixture is about to be created. It will receive a single argument (the function
implementing the fixture) and should return the function to use. For example you can use `saved_fixture` from
`pytest-harvest` as a hook in order to save all such created fixtures in the fixture store.
:param debug: print debug messages on stdout to analyze fixture creation (use pytest -s to see them)
:param args: additional {argnames: argvalues} definition
:return:
"""
_decorate, needs_inject = _parametrize_plus(argnames, argvalues, indirect=indirect, ids=ids, idgen=idgen,
auto_refs=auto_refs, idstyle=idstyle, scope=scope,
hook=hook, debug=debug, **args)
if needs_inject:
@inject_host
def _apply_parametrize_plus(f, host_class_or_module):
return _decorate(f, host_class_or_module)
return _apply_parametrize_plus
else:
return _decorate
class InvalidIdTemplateException(Exception):
"""
Raised when a string template provided in an `idgen` raises an error
"""
def __init__(self, idgen, params, caught):
self.idgen = idgen
self.params = params
self.caught = caught
super(InvalidIdTemplateException, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
return "Error generating test id using name template '%s' with parameter values " \
"%r. Please check the name template. Caught: %s - %s" \
% (self.idgen, self.params, self.caught.__class__, self.caught)
def _parametrize_plus(argnames=None, # type: Union[str, Tuple[str], List[str]]
argvalues=None, # type: Iterable[Any]
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle=None, # type: Optional[Union[str, Callable]]
idgen=_IDGEN, # type: Union[str, Callable]
auto_refs=True, # type: bool
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
# type: (...) -> Tuple[Callable[[T], T], bool]
"""
:return: a tuple (decorator, needs_inject) where needs_inject is True if decorator has signature (f, host)
and False if decorator has signature (f)
"""
# first handle argnames / argvalues (new modes of input)
argnames, argvalues = _get_argnames_argvalues(argnames, argvalues, **args)
# argnames related
initial_argnames = ','.join(argnames)
nb_params = len(argnames)
# extract all marks and custom ids.
# Do not check consistency of sizes argname/argvalue as a fixture_ref can stand for several argvalues.
marked_argvalues = argvalues
has_cust_ids = (idgen is not _IDGEN or len(args) > 0) or (ids is not None)
p_ids, p_marks, argvalues, fixture_indices, mod_lvid_indices = \
_process_argvalues(argnames, marked_argvalues, nb_params, has_cust_ids, auto_refs=auto_refs)
# idgen default
if idgen is _IDGEN:
# default: use the new id style only when some keyword **args are provided and there are no fixture refs
idgen = AUTO if (len(args) > 0 and len(fixture_indices) == 0 and ids is None) else None
if idgen is AUTO:
# note: we use a "trick" here with mini_idval to get the appropriate result (argname='', idx=v)
def _make_ids(**args):
for n, v in args.items():
yield "%s=%s" % (n, mini_idval(val=v, argname='', idx=v))
idgen = lambda **args: "-".join(_make_ids(**args)) # noqa
# generate id
if idgen is not None:
if ids is not None:
raise ValueError("Only one of `ids` and `idgen` should be provided")
ids = _gen_ids(argnames, argvalues, idgen)
if len(fixture_indices) == 0:
# No fixture reference: fallback to a standard pytest.mark.parametrize
if debug:
print("No fixture reference found. Calling @pytest.mark.parametrize...")
print(" - argnames: %s" % initial_argnames)
print(" - argvalues: %s" % marked_argvalues)
print(" - ids: %s" % ids)
# handle infinite iterables like latest pytest, for convenience
ids = resolve_ids(ids, marked_argvalues, full_resolve=False)
# no fixture reference: shortcut, do as usual (note that the hook won't be called since no fixture is created)
_decorator = pytest.mark.parametrize(initial_argnames, marked_argvalues, indirect=indirect,
ids=ids, scope=scope)
if indirect:
return _decorator, False
else:
# wrap the decorator to check if the test function has the parameters as arguments
def _apply(test_func):
# type: (...) -> Callable[[T], T]
if not safe_isclass(test_func):
# a Function: raise a proper error message if improper use
s = signature(test_func)
for p in argnames:
if p not in s.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, s))
else:
# a Class: we cannot really perform any check.
pass
return _decorator(test_func)
return _apply, False
else:
# there are fixture references: we will create a specific decorator replacing the params with a "union" fixture
if indirect:
warn("Using `indirect=True` at the same time as fixture references in `@parametrize` is not guaranteed to "
"work and is strongly discouraged for readability reasons. See "
"https://github.com/smarie/python-pytest-cases/issues/150")
# First unset the pytest.param id we have set earlier in _process_argvalues: indeed it is only needed in
# the case above where we were defaulting to legacy @pytest.mark.parametrize .
# Here we have fixture refs so we will create a fixture union with several ParamAlternative, and their id will
# anyway be generated with `mini_idvalset` which tackles the case of lazy_value used for a tuple of args
for i in mod_lvid_indices:
p_ids[i] = None
if p_marks[i]:
marked_argvalues[i] = ParameterSet(values=marked_argvalues[i].values, id=None, marks=p_marks[i])
else:
marked_argvalues[i] = argvalues[i] # we can even remove the pytest.param wrapper
if indirect:
raise ValueError("Setting `indirect=True` is not yet supported when at least a `fixure_ref` is present in "
"the `argvalues`.")
if debug:
print("Fixture references found. Creating references and fixtures...")
param_names_str = '_'.join(argnames).replace(' ', '')
# Are there explicit ids provided ?
explicit_ids_to_use = False
ids = resolve_ids(ids, argvalues, full_resolve=False)
if isinstance(ids, list):
explicit_ids_to_use = True
# First define a few functions that will help us create the various fixtures to use in the final "union"
def _create_params_alt(fh, test_func, union_name, from_i, to_i, hook): # noqa
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
# is this about a single value or several values ?
if to_i == from_i + 1:
i = from_i
del from_i
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
return SingleParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, i=i,
argvalue=marked_argvalues[i], id=_id,
hook=hook, debug=debug)
else:
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used later
_ids = ids[from_i:to_i] if explicit_ids_to_use else ids
return MultiParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, from_i=from_i,
to_i=to_i, argvalues=marked_argvalues[from_i:to_i], ids=_ids,
hook=hook, debug=debug)
def _create_fixture_ref_alt(union_name, test_func, i): # noqa
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
# Get the referenced fixture name
f_fix_name = argvalues[i].fixture
if debug:
print(" - Creating reference to existing fixture %r" % (f_fix_name,))
# Create the alternative
f_fix_alt = FixtureParamAlternative(union_name=union_name, fixture_ref=argvalues[i],
decorated=test_func, argnames=argnames, param_index=i, id=_id)
# Finally copy the custom id/marks on the FixtureParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
f_fix_alt = ParameterSet(values=(f_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return f_fix_alt
def _create_fixture_ref_product(fh, union_name, i, fixture_ref_positions, test_func, hook): # noqa
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used
_id = ids[i] if explicit_ids_to_use else ids
# values to use:
param_values = argvalues[i]
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func.__name__, param_names_str, i)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize)
if debug:
print(" - Creating new fixture %r to handle parameter %s that is a cross-product" % (p_fix_name, i))
# Create the fixture
_make_fixture_product(fh, name=p_fix_name, hook=hook, caller=parametrize,
fixtures_or_values=param_values, fixture_positions=fixture_ref_positions)
# Create the corresponding alternative
p_fix_alt = ProductParamAlternative(union_name=union_name, alternative_name=p_fix_name, decorated=test_func,
argval=argvalues[i], argnames=argnames, param_index=i, id=_id)
# copy the custom id/marks to the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
p_fix_alt = ParameterSet(values=(p_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return p_fix_alt
# Then create the decorator per se
def parametrize_plus_decorate(test_func, fixtures_dest):
# type: (...) -> Callable[[T], T]
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
test_func_name = test_func.__name__
# first check if the test function has the parameters as arguments
if safe_isclass(test_func):
# a test class: not supported yet
raise NotImplementedError("@parametrize can not be used to decorate a Test class when the argvalues "
"contain at least one reference to a fixture.")
old_sig = signature(test_func)
for p in argnames:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func_name, old_sig))
# The name for the final "union" fixture
# style_template = "%s_param__%s"
main_fixture_style_template = "%s_%s"
fixture_union_name = main_fixture_style_template % (test_func_name, param_names_str)
fixture_union_name = check_name_available(fixtures_dest, fixture_union_name, if_name_exists=CHANGE,
caller=parametrize)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
fixture_alternatives = []
prev_i = -1
for i, j_list in fixture_indices: # noqa
# A/ Is there any non-empty group of 'normal' parameters before the fixture_ref at <i> ? If so, handle.
if i > prev_i + 1:
# create a new "param" fixture parametrized with all of that consecutive group.
# Important note: we could either wish to create one fixture for parameter value or to create
# one for each consecutive group as shown below. This should not lead to different results but perf
# might differ. Maybe add a parameter in the signature so that users can test it ?
# this would make the ids more readable by removing the "P2toP3"-like ids
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# B/ Now handle the fixture ref at position <i>
if j_list is None:
# argvalues[i] contains a single argvalue that is a fixture_ref : add the referenced fixture
f_fix_alt = _create_fixture_ref_alt(union_name=fixture_union_name, test_func=test_func, i=i)
fixture_alternatives.append(f_fix_alt)
else:
# argvalues[i] is a tuple, some of them being fixture_ref. create a fixture referring to all of them
prod_fix_alt = _create_fixture_ref_product(fixtures_dest, union_name=fixture_union_name, i=i,
fixture_ref_positions=j_list,
test_func=test_func, hook=hook)
fixture_alternatives.append(prod_fix_alt)
prev_i = i
# C/ handle last consecutive group of normal parameters, if any
i = len(argvalues) # noqa
if i > prev_i + 1:
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# if fixtures_to_union has length 1, simplify ? >> No, we leave such "optimization" to the end user
# Handle the list of alternative names. Duplicates should be removed here
fix_alt_names = []
for alt in fixture_alternatives:
if is_marked_parameter_value(alt):
# wrapped by a pytest.param
alt = get_marked_parameter_values(alt, nbargs=1)
assert len(alt) == 1, "Error with alternative please report"
alt = alt[0]
if alt.alternative_name not in fix_alt_names:
fix_alt_names.append(alt.alternative_name)
else:
# non-unique alt fixture names should only happen when the alternative is a fixture reference
assert isinstance(alt, FixtureParamAlternative), "Created fixture names not unique, please report"
# Finally create a "main" fixture with a unique name for this test function
if debug:
print("Creating final union fixture %r with alternatives %r"
% (fixture_union_name, UnionFixtureAlternative.to_list_of_fixture_names(fixture_alternatives)))
# use the custom subclass of idstyle that was created for ParamAlternatives
if idstyle is None or isinstance(idstyle, string_types):
_idstyle = ParamIdMakers.get(idstyle)
else:
_idstyle = idstyle
# note: the function automatically registers it in the module
_make_fixture_union(fixtures_dest, name=fixture_union_name, hook=hook, caller=parametrize,
fix_alternatives=fixture_alternatives, unique_fix_alt_names=fix_alt_names,
idstyle=_idstyle, scope=scope)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
# first check where we should insert the new parameters (where is the first param we remove)
_first_idx = -1
for _first_idx, _n in enumerate(old_sig.parameters):
if _n in argnames:
break
# then remove all parameters that will be replaced by the new fixture
new_sig = remove_signature_parameters(old_sig, *argnames)
# finally insert the new fixture in that position. Indeed we can not insert first or last, because
# 'self' arg (case of test class methods) should stay first and exec order should be preserved when possible
new_sig = add_signature_parameters(new_sig, custom_idx=_first_idx,
custom=Parameter(fixture_union_name,
kind=Parameter.POSITIONAL_OR_KEYWORD))
if debug:
print("Creating final test function wrapper with signature %s%s" % (test_func_name, new_sig))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
def replace_paramfixture_with_values(kwargs): # noqa
# remove the created fixture value
encompassing_fixture = kwargs.pop(fixture_union_name)
# and add instead the parameter values
if nb_params > 1:
for i, p in enumerate(argnames): # noqa
try:
kwargs[p] = encompassing_fixture[i]
except TypeError:
raise Exception("Unable to unpack parameter value to a tuple: %r" % encompassing_fixture)
else:
kwargs[argnames[0]] = encompassing_fixture
# return
return kwargs
if not isgeneratorfunction(test_func):
# normal test or fixture function with return statement
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
return NOT_USED
else:
replace_paramfixture_with_values(kwargs)
return test_func(*args, **kwargs)
else:
# generator test or fixture function (with one or several yield statements)
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
yield NOT_USED
else:
replace_paramfixture_with_values(kwargs)
for res in test_func(*args, **kwargs):
yield res
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
try:
# propagate existing attribute if any
wrapped_test_func.place_as = test_func.place_as
except: # noqa
# position the test at the original function's position
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate, True
def _get_argnames_argvalues(
argnames=None, # type: Union[str, Tuple[str], List[str]]
argvalues=None, # type: Iterable[Any]
**args
):
"""
:param argnames:
:param argvalues:
:param args:
:return: argnames, argvalues - both guaranteed to be lists
"""
# handle **args - a dict of {argnames: argvalues}
if len(args) > 0:
kw_argnames, kw_argvalues = cart_product_pytest(tuple(args.keys()), tuple(args.values()))
else:
kw_argnames, kw_argvalues = (), ()
if argnames is None:
# (1) all {argnames: argvalues} pairs are provided in **args
if argvalues is not None or len(args) == 0:
raise ValueError("No parameters provided")
argnames = kw_argnames
argvalues = kw_argvalues
# simplify if needed to comply with pytest.mark.parametrize
if len(argnames) == 1:
argvalues = [_l[0] if not is_marked_parameter_value(_l) else _l for _l in argvalues]
return argnames, argvalues
if isinstance(argnames, string_types):
# (2) argnames + argvalues, as usual. However **args can also be passed and should be added
argnames = get_param_argnames_as_list(argnames)
if not isinstance(argnames, (list, tuple)):
raise TypeError("argnames should be a string, list or a tuple")
if any([not isinstance(argname, str) for argname in argnames]):
raise TypeError("all argnames should be strings")
if argvalues is None:
raise ValueError("No argvalues provided while argnames are provided")
# transform argvalues to a list (it can be a generator)
try:
argvalues = list(argvalues)
except TypeError:
raise InvalidParamsList(argvalues)
# append **args
if len(kw_argnames) > 0:
argnames, argvalues = cart_product_pytest((argnames, kw_argnames),
(argvalues, kw_argvalues))
return argnames, argvalues
def _gen_ids(argnames, argvalues, idgen):
"""
Generates an explicit test ids list from a non-none `idgen`.
`idgen` should be either a callable of a string template.
:param argnames:
:param argvalues:
:param idgen:
:return:
"""
if not callable(idgen):
# idgen is a new-style string formatting template
if not isinstance(idgen, string_types):
raise TypeError("idgen should be a callable or a string, found: %r" % idgen)
_formatter = idgen
def gen_id_using_str_formatter(**params):
try:
# format using the idgen template
return _formatter.format(**params)
except Exception as e:
raise InvalidIdTemplateException(_formatter, params, e)
idgen = gen_id_using_str_formatter
if len(argnames) > 1:
ids = [idgen(**{n: v for n, v in zip(argnames, _argvals)}) for _argvals in argvalues]
else:
_only_name = argnames[0]
ids = [idgen(**{_only_name: v}) for v in argvalues]
return ids
def _process_argvalues(argnames, marked_argvalues, nb_params, has_custom_ids, auto_refs):
"""Internal method to use in _pytest_parametrize_plus
Processes the provided marked_argvalues (possibly marked with pytest.param) and returns
p_ids, p_marks, argvalues (not marked with pytest.param), fixture_indices
Note: `marked_argvalues` is modified in the process if a `lazy_value` is found with a custom id or marks.
:param argnames:
:param marked_argvalues:
:param nb_params:
:param has_custom_ids: a boolean indicating if custom ids are provided separately in `ids` or `idgen` (see
@parametrize)
:param auto_refs: if True, a `fixture_ref` will be created around fixture symbols used as argvalues automatically
:return:
"""
p_ids, p_marks, argvalues = extract_parameterset_info(argnames, marked_argvalues, check_nb=False)
# find if there are fixture references or lazy values in the values provided
fixture_indices = []
mod_lvid_indices = [] # indices of lazy_values for which we created a wrapper pytest.param with an id
if nb_params == 1:
for i, v in enumerate(argvalues):
if is_lazy_value(v):
# --- A lazy value is used for several parameters at the same time ---
# Users can declare custom marks in the lazy value API, we have to take these into account
# (1) if there was a pytest.param around it, we have to merge the marks from the lazy value into it
# (2) if there was no pytest.param around it and there are marks, we have to create the pytest.param
# Note: a custom id in lazy value does not require such processing as it does not need to take
# precedence over `ids` or `idgen`
# are there any marks ? (either added with lazy_value(marks=), or on the function itself)
_mks = v.get_marks(as_decorators=True)
if len(_mks) > 0:
# update/create the pytest.param marks on this value
p_marks[i] = (p_marks[i] + _mks) if p_marks[i] is not None else _mks
# update the original marked_argvalues. Note that argvalues[i] = v
marked_argvalues[i] = ParameterSet(values=(argvalues[i],), id=p_ids[i], marks=p_marks[i])
else:
if auto_refs and is_fixture(v):
# auto create wrapper fixture_refs
argvalues[i] = v = fixture_ref(v)
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = v
else:
marked_argvalues[i] = ParameterSet(values=(v,), id=p_ids[i], marks=p_marks[i])
if isinstance(v, fixture_ref):
# Fix the referenced fixture length
v.theoretical_size = nb_params
fixture_indices.append((i, None))
elif nb_params > 1:
for i, v in enumerate(argvalues):
# A/ First analyze what is the case at hand
_lazyvalue_used_as_tuple = False
_fixtureref_used_as_tuple = False
if is_lazy_value(v):
_lazyvalue_used_as_tuple = True
else:
if auto_refs and is_fixture(v):
# auto create wrapper fixture_refs
argvalues[i] = v = fixture_ref(v)
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = v
else:
marked_argvalues[i] = ParameterSet(values=(v,), id=p_ids[i], marks=p_marks[i])
if isinstance(v, fixture_ref):
# Fix the referenced fixture length
v.theoretical_size = nb_params
_fixtureref_used_as_tuple = True
elif len(v) == 1:
# same than above but it was in a pytest.param
if is_lazy_value(v[0]):
argvalues[i] = v = v[0]
_lazyvalue_used_as_tuple = True
else:
if auto_refs and is_fixture(v[0]):
# auto create wrapper fixture_refs
v = (fixture_ref(v[0]),)
if isinstance(v[0], fixture_ref):
_fixtureref_used_as_tuple = True
argvalues[i] = v = v[0]
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = v
else:
marked_argvalues[i] = ParameterSet(values=(v,), id=p_ids[i], marks=p_marks[i])
# Fix the referenced fixture length
v.theoretical_size = nb_params
# B/ Now process it
if _lazyvalue_used_as_tuple:
# --- A lazy value is used for several parameters at the same time ---
# Since users have the possibility in the lazy value API to declare a custom id or custom marks,
# we have to take these into account.
# MARKS:
# (1) if there was a pytest.param around it, we have to merge the marks from the lazy value into it
# (2) if there was no pytest.param around it and there are marks, we have to create the pytest.param
# IDS:
# As opposed to the case of nb_params=1, we can not let pytest generate the id as it would create a
# tuple of LazyTupleItem ids (e.g. <id>[0]-<id>[1]-...). So
# (1) if there is a custom id list or generator, do not care about this.
# (2) if there is a pytest.param with a custom id, do not care about this
# (3) if there is nothing OR if there is a pytest.param with no id, we should create a pytest.param with
# the id.
# in this particular case we have to modify the initial list
argvalues[i] = v.as_lazy_tuple(nb_params)
# TUPLE usage: if the id is not provided elsewhere we HAVE to set an id to avoid <id>[0]-<id>[1]...
if p_ids[i] is None and not has_custom_ids:
if not has_pytest_param:
if v._id is not None:
# (on pytest 2 we cannot do it since pytest.param does not exist)
warn("The custom id %r in `lazy_value` will be ignored as this version of pytest is too old"
" to support `pytest.param`." % v._id)
else:
pass # no warning, but no p_id update
else:
# update/create the pytest.param id on this value
p_ids[i] = v.get_id()
mod_lvid_indices.append(i)
# handle marks
_mks = v.get_marks(as_decorators=True)
if len(_mks) > 0:
# update/create the pytest.param marks on this value
p_marks[i] = (p_marks[i] + _mks) if p_marks[i] is not None else _mks
# update the marked_argvalues
# - at least with the unpacked lazytuple if no pytest.param is there or needs to be created
# - with a pytest.param if one is needed
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = argvalues[i]
else:
# note that here argvalues[i] IS a tuple-like so we do not create a tuple around it
marked_argvalues[i] = ParameterSet(values=argvalues[i], id=p_ids[i], marks=p_marks[i] or ())
elif _fixtureref_used_as_tuple:
# a fixture ref is used for several parameters at the same time
fixture_indices.append((i, None))
else:
# Tuple: check nb params for consistency
if len(v) != len(argnames):
raise ValueError("Inconsistent number of values in pytest parametrize: %s items found while the "
"number of parameters is %s: %s." % (len(v), len(argnames), v))
# let's dig into the tuple to check if there are fixture_refs or lazy_values
lv_pos_list = [j for j, _pval in enumerate(v) if is_lazy_value(_pval)]
if len(lv_pos_list) > 0:
_mks = [mk for _lv in lv_pos_list for mk in v[_lv].get_marks(as_decorators=True)]
if len(_mks) > 0:
# update/create the pytest.param marks on this value (global). (id not taken into account)
p_marks[i] = (list(p_marks[i]) + _mks) if p_marks[i] is not None else list(_mks)
marked_argvalues[i] = ParameterSet(values=argvalues[i], id=p_ids[i], marks=p_marks[i] or ())
# auto create fixtures
if auto_refs:
autofix_pos_list = [j for j, _pval in enumerate(v) if is_fixture(_pval)]
if len(autofix_pos_list) > 0:
# there is at least one fixture without wrapping ref inside the tuple
autov = list(v)
for _k in autofix_pos_list:
autov[_k] = fixture_ref(autov[_k])
argvalues[i] = v = tuple(autov)
if p_ids[i] is None and p_marks[i] is None:
marked_argvalues[i] = argvalues[i]
else:
# note that here argvalues[i] IS a tuple-like so we do not create a tuple around it
marked_argvalues[i] = ParameterSet(values=argvalues[i], id=p_ids[i], marks=p_marks[i] or ())
fix_pos_list = [j for j, _pval in enumerate(v) if isinstance(_pval, fixture_ref)]
if len(fix_pos_list) > 0:
# there is at least one fixture ref inside the tuple
fixture_indices.append((i, fix_pos_list))
# let's dig into the tuple
# has_val_ref = any(isinstance(_pval, lazy_value) for _pval in v)
# val_pos_list = [j for j, _pval in enumerate(v) if isinstance(_pval, lazy_value)]
# if len(val_pos_list) > 0:
# # there is at least one value ref inside the tuple
# argvalues[i] = tuple_with_value_refs(v, theoreticalsize=nb_params, positions=val_pos_list)
return p_ids, p_marks, argvalues, fixture_indices, mod_lvid_indices
|
e273be23ae3b1e424344ed9cbdaa45a9fc37d854
|
2853845c003d03db22f67c3303fa1ec333180ae7
|
/fedlearner/trainer/trainer_master.py
|
51a97419464f5a6d6e11ec65808c9c8367d7b480
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
bytedance/fedlearner
|
fc1dd2ba2ec88092e83a32732eccea52451ce552
|
436e4959952c970917ee8f47b920f0a76cd4dd05
|
refs/heads/master
| 2023-08-14T23:01:02.875453
| 2023-05-23T03:44:03
| 2023-05-23T03:44:03
| 235,348,659
| 893
| 243
|
Apache-2.0
| 2023-06-08T07:37:18
| 2020-01-21T13:26:35
|
Python
|
UTF-8
|
Python
| false
| false
| 27,930
|
py
|
trainer_master.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import os
import signal
import time
from typing import Optional
from concurrent import futures
import threading
import grpc
import tensorflow.compat.v1 as tf
from fedlearner.common import fl_logging
from fedlearner.common import trainer_master_service_pb2 as tm_pb
from fedlearner.common import trainer_master_service_pb2_grpc as tm_grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common.metric_collector import metric_collector
from fedlearner.trainer.bridge import FakeBridge
from fedlearner.trainer.estimator import FLEstimator
from fedlearner.trainer.sparse_estimator import SparseFLEstimator
from fedlearner.trainer.cluster_server import ClusterServer
class ExportModelHook():
def after_save(self, sess, model, export_dir, inputs, outputs):
pass
class _TriggerHook(tf.train.SessionRunHook):
def __init__(self,
trigger_secs=None,
trigger_steps=None,
trigger_fn=None):
self._trigger_secs = trigger_secs
self._trigger_steps = trigger_steps
self._trigger_fn = trigger_fn
def begin(self):
self._global_step_tensor = tf.train.get_or_create_global_step()
self._last_triggered_time = None
self._last_triggered_step = None
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._should_trigger(global_step):
self._trigger(global_step)
def end(self, session):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def _should_trigger(self, global_step):
if self._last_triggered_time is None \
or self._last_triggered_step is None:
return True
if self._trigger_secs is not None:
if time.time() >= self._last_triggered_time + self._trigger_secs:
return True
if self._trigger_steps is not None:
if global_step >= self._last_triggered_step + self._trigger_steps:
return True
return False
def _trigger(self, global_step):
if self._trigger_fn:
self._trigger_fn(global_step)
self._last_triggered_time = time.time()
self._last_triggered_step = global_step
class _CheckpointSaverHook(tf.train.CheckpointSaverHook):
def _save(self, session, step):
if self._timer.last_triggered_step() is None:
# skip save checkpoint
fl_logging.info("skip save checkpoint at first time")
return False
return super(_CheckpointSaverHook, self)._save(session, step)
class _DataVisitorCheckpointHook(tf.train.SessionRunHook):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt_plhd = tf.placeholder(tf.string, name="data_checkpoint_plhd")
self._ckpt_var = tf.Variable("", name="data_checkpoint")
self._save_op = self._ckpt_var.assign(self._ckpt_plhd)
def after_create_session(self, session, coord):
data = session.run(self._ckpt_var)
self._visitor.restore(data)
def before_checkpoint_save(self, session, global_step_value):
data = self._visitor.dump()
fl_logging.info("DataVisitor save checkpoint for global step %d, "
"size: %d", global_step_value, len(data))
session.run(
self._save_op,
{self._ckpt_plhd: data},
)
def create_checkpoint_saver_listener(self):
return _DataVisitorCheckpointHook.CheckpointSaverListener(self)
class CheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, hook):
self._hook = hook
def before_save(self, session, global_step_value):
self._hook.before_checkpoint_save(session, global_step_value)
class DataBlockCheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable("", name="data_checkpoint")
self._save_op = var_tmp.assign(self._ckpt)
def before_save(self, session, global_step_value):
session.run(
self._save_op,
{self._ckpt: self._visitor.dump()}
)
#fl_logging.info("data checkpoint saved result: %s", res)
class _FakeTrainerMasterClient():
pass
class _TrainerMaster(tm_grpc.TrainerMasterServiceServicer):
def __init__(self,
cluster_server: ClusterServer,
role,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None,
export_model: Optional[bool] = None):
self._cluster_server = cluster_server
self._role = role
self._mode = mode
self._model_fn = model_fn
self._input_fn = input_fn
self._serving_input_receiver_fn = serving_input_receiver_fn
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._checkpoint_path = checkpoint_path
self._save_checkpoint_steps = save_checkpoint_steps
self._save_checkpoint_secs = save_checkpoint_secs
self._summary_path = summary_path
self._summary_save_steps = summary_save_steps
self._summary_save_secs = summary_save_secs
self._export_path = export_path
self._sparse_estimator = sparse_estimator
self._export_model_hook = export_model_hook
self._should_export_model = export_model
self._lock = threading.RLock()
self._status = tm_pb.MasterStatus.CREATED
self._checkpoint_listeners = []
self._session_hooks = []
self._running_workers = set() # set(worker_rank)
self._completed_workers = set() # set(worker_rank)
# for compatibility
self._worker0_terminated_at = 0
self._worker0_cluster_def = None
def _check_status(self, callback_fn):
with self._lock:
return callback_fn(self._status)
def _run_grpc_server(self, address):
self._grpc_server = grpc.server(
futures.ThreadPoolExecutor(
max_workers=8,
thread_name_prefix="TrainerMasterServerThreadPoolExecutor"
))
tm_grpc.add_TrainerMasterServiceServicer_to_server(
self, self._grpc_server)
self._grpc_server.add_insecure_port(address)
self._grpc_server.start()
fl_logging.info('Trainer Master Server start on address: %s', address)
def _transfer_status(self, frm, to):
if self._status != frm:
raise RuntimeError(
"Trainer Master status transfer failed, "
"want from %s to %s, but current status: %s"% \
(tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to),
tm_pb.MasterStatus.Name(self._status))
)
self._status = to
fl_logging.info("Trainer Master status transfer, from %s to %s",
tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to))
def run_forever(self, listen_port=None):
with self._lock:
self._transfer_status(tm_pb.MasterStatus.CREATED,
tm_pb.MasterStatus.INITIALING)
if listen_port:
self._run_grpc_server(listen_port)
while self._cluster_server is None:
# waiting receive cluster_def from worker0
with self._lock:
if self._worker0_cluster_def:
fl_logging.info("received worker_0 cluster_def: %s",
self._worker0_cluster_def)
self._cluster_server = ClusterServer(
tf.train.ClusterSpec(self._worker0_cluster_def),
"master")
break
fl_logging.info("still waiting receive cluster_def from worker_0")
time.sleep(2)
self._run()
sig = signal.sigwait([signal.SIGHUP, signal.SIGINT, signal.SIGTERM])
fl_logging.info("Server shutdown by signal: %s",
signal.Signals(sig).name)
def _add_checkpoint_listener(self, listener):
with self._lock:
self._checkpoint_listeners.append(listener)
def _add_session_hook(self, hook):
with self._lock:
self._session_hooks.append(hook)
def _create_estimator(self):
estimator_factory = SparseFLEstimator \
if self._sparse_estimator else FLEstimator
return estimator_factory(
cluster_server=self._cluster_server,
bridge=FakeBridge(),
trainer_master=_FakeTrainerMasterClient(),
role=self._role,
model_fn=self._model_fn)
def _run(self):
fl_logging.info("create estimator")
estimator = self._create_estimator()
fl_logging.info("start session_run")
self._session_run(estimator)
fl_logging.info("session_run done")
if self._should_export_model or \
(self._mode == 'train' and self._should_export_model is None):
fl_logging.info("start export_model")
self._export_model(estimator)
fl_logging.info("export_model done")
self._transfer_status(tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED)
def _session_run(self, estimator):
mode_key = tf.estimator.ModeKeys.TRAIN if self._mode == "train" \
else tf.estimator.ModeKeys.EVAL
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
features, labels = estimator. \
_get_features_and_labels_from_input_fn(
self._input_fn, mode_key)
# only for create graph
spec, _ = estimator._get_model_spec(
features, labels, mode_key)
session_creator = tf.train.ChiefSessionCreator(
master=self._cluster_server.target,
config=self._cluster_server.cluster_config,
checkpoint_filename_with_path= \
self._checkpoint_filename_with_path
)
hooks = self._session_hooks
# saver hook
if (mode_key == tf.estimator.ModeKeys.TRAIN or
self._should_export_model) \
and self._checkpoint_path \
and (self._save_checkpoint_secs \
or self._save_checkpoint_steps):
hooks.append(
tf.train.CheckpointSaverHook(
checkpoint_dir=self._checkpoint_path,
save_secs=self._save_checkpoint_secs,
save_steps=self._save_checkpoint_steps,
listeners=self._checkpoint_listeners,
)
)
# summary hook
if mode_key == tf.estimator.ModeKeys.TRAIN \
and (self._summary_save_secs or self._summary_save_steps):
if not self._summary_path:
self._summary_path = self._checkpoint_path
if self._summary_path:
hooks.append(
tf.train.SummarySaverHook(
output_dir=self._summary_path,
save_secs=self._summary_save_secs,
save_steps=self._summary_save_steps,
scaffold=session_creator._scaffold,
)
)
noop = tf.no_op()
with tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks) as sess:
with self._lock:
# ready, set status to running
self._transfer_status(tm_pb.MasterStatus.INITIALING,
tm_pb.MasterStatus.RUNNING)
while True:
sess.run(noop)
with self._lock:
if self._status == tm_pb.MasterStatus.WORKER_COMPLETED:
break
time.sleep(0.2)
def _export_model(self, estimator):
if self._export_path:
export_path = os.path.join(
self._export_path, str(self._worker0_terminated_at))
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
receiver = self._serving_input_receiver_fn()
spec, model = estimator._get_model_spec(
receiver.features, None, tf.estimator.ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session(
target=self._cluster_server.target,
config=self._cluster_server.cluster_config) as sess:
tf.saved_model.simple_save(sess, export_path,
receiver.receiver_tensors,
spec.predictions, None)
if self._export_model_hook:
self._export_model_hook.after_save(
sess, model, export_path,
receiver.receiver_tensors, spec.predictions)
def _request_data_block(self, request):
"""override by subclass"""
raise RuntimeError("Unimplement")
def RequestDataBlock(self, request, context):
if request.worker_rank not in self._running_workers:
fl_logging.warning(
'[RequestDataBlock] worker rank %d is not found in '
'running workers %s', request.worker_rank,
self._running_workers)
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
if request.worker_rank in self._completed_workers:
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="worker has completed")
)
return self._request_data_block(request)
def WorkerRegister(self, request, context):
with self._lock:
# for compatibility, more information see:
# protocal/fedlearner/common/trainer_master_service.proto
if self._worker0_cluster_def is None and request.worker_rank == 0:
self._worker0_cluster_def = request.cluster_def
if self._status in (tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED):
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED
))
if self._status != tm_pb.MasterStatus.RUNNING:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode. \
STATUS_WAIT_FOR_SYNCING_CHECKPOINT
))
if request.worker_rank in self._running_workers:
fl_logging.warning("worker_%d:%s repeat registration",
request.worker_rank, request.hostname)
else:
fl_logging.info("worker_%d:%s registration",
request.worker_rank, request.hostname)
self._running_workers.add(request.worker_rank)
if request.worker_rank in self._completed_workers:
self._completed_workers.remove(request.worker_rank)
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def WorkerComplete(self, request, context):
with self._lock:
if request.worker_rank not in self._running_workers:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
fl_logging.info("worker_%d completed", request.worker_rank)
self._completed_workers.add(request.worker_rank)
if request.worker_rank == 0:
self._worker0_terminated_at = request.timestamp
if len(self._running_workers) == len(self._completed_workers) \
and 0 in self._running_workers:
# worker 0 completed and all datablock has finished
self._transfer_status(tm_pb.MasterStatus.RUNNING,
tm_pb.MasterStatus.WORKER_COMPLETED)
return tm_pb.WorkerCompleteResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def IsCompleted(self, request, context):
with self._lock:
return tm_pb.IsCompletedResponse(
completed=(self._status == tm_pb.MasterStatus.COMPLETED)
)
class LeaderTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None,
export_model: Optional[bool] = None):
super(LeaderTrainerMaster, self).__init__(
cluster_server,
"leader",
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook,
export_model=export_model
)
self._data_visitor = data_visitor
self._last_global_step = -1
# datavisitor checkpoint hook
if mode == 'train':
hook = _DataVisitorCheckpointHook(self._data_visitor)
self._add_checkpoint_listener(
hook.create_checkpoint_saver_listener())
self._add_session_hook(hook)
# trigger hook
self._last_trigger_time = 0
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_fn)
)
# worker type: data source type
self._worker_data_source_map = {
tm_pb.WorkerType.REMOTE_WORKER: tm_pb.DataSourceType.JOINED,
tm_pb.WorkerType.LOCAL_WORKER: tm_pb.DataSourceType.LOCAL
}
def _trigger_fn(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
allocated_epoch, allocated_datablock, allocated_local_datablock \
= self._data_visitor.summary()
total_epoch, total_datablock, total_local_datablock = \
self._data_visitor.epoch_num, \
self._data_visitor.datablock_size, \
self._data_visitor.local_datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"epoch: %d/%d, datablock allocated: %d/%d, "
"local datablock allocated: %d/%d, "
"worker: %d/%d(running/completed)",
global_step, speed,
allocated_epoch, total_epoch,
allocated_datablock, total_datablock,
allocated_local_datablock, total_local_datablock,
len(self._running_workers),
len(self._completed_workers))
name_prefix = f'model.{self._mode}.nn_vertical'
metric_collector.emit_store(
f'{name_prefix}.global_step', global_step)
metric_collector.emit_store(
f'{name_prefix}.datablock_total', total_datablock)
metric_collector.emit_store(
f'{name_prefix}.datablock_allocated', allocated_datablock)
metric_collector.emit_store(
f'{name_prefix}.local_datablock_total',
total_local_datablock
)
metric_collector.emit_store(
f'{name_prefix}.local_datablock_allocated',
allocated_local_datablock
)
metric_collector.emit_store(f'{name_prefix}.speed', speed)
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
try:
data_block = self._data_visitor.next_with_type(
self._worker_data_source_map[request.worker_type])
except StopIteration:
data_block = None
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED,
error_message="data block finished")
)
return response
class FollowerTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None,
export_model: Optional[bool] = None
):
super(FollowerTrainerMaster, self).__init__(
cluster_server,
"follower",
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook,
export_model=export_model
)
self._data_visitor = data_visitor
self._last_global_step = -1
# trigger hook
self._last_trigger_time = 0
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_fn)
)
def _trigger_fn(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
total_datablock = self._data_visitor.datablock_size
total_local_datablock = self._data_visitor.local_datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"datablock size: %d, "
"local datablock size: %d, "
"worker: %d/%d(running/completed)",
global_step, speed,
total_datablock,
total_local_datablock,
len(self._running_workers),
len(self._completed_workers))
name_prefix = f'model.{self._mode}.nn_vertical'
metric_collector.emit_store(
f'{name_prefix}.global_step', global_step)
metric_collector.emit_store(
f'{name_prefix}.datablock_total', total_datablock)
metric_collector.emit_store(
f'{name_prefix}.local_datablock_total', total_local_datablock)
metric_collector.emit_store(f'{name_prefix}.speed', speed)
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
data_block = self._data_visitor.get_datablock_by_id(request.block_id)
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
fl_logging.error("invalid data block id: %s", request.block_id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_DATA_BLOCK,
error_message="invalid data block")
)
return response
|
b7b2e2c96667ee57bdd9b4f59f17602feffb2de3
|
1364c5dde278482607c17c9853c5dd65a18b729f
|
/plugins/modules/ntc_install_os.py
|
3ec973d6e1901f0f1546f49926359e1d96238280
|
[
"Apache-2.0"
] |
permissive
|
networktocode/ntc-ansible
|
ac0bfd3645f6eaedba2c1138b0e6f2d3c80d7057
|
dc577937614e68e6e9178097c08d775638d7fc6a
|
refs/heads/develop
| 2023-08-25T18:50:09.885635
| 2023-04-13T14:40:19
| 2023-04-13T14:40:19
| 40,731,097
| 295
| 124
|
NOASSERTION
| 2023-08-11T20:41:01
| 2015-08-14T18:50:41
|
Python
|
UTF-8
|
Python
| false
| false
| 14,644
|
py
|
ntc_install_os.py
|
#!/usr/bin/python
# Copyright 2015 Jason Edelman <jason@networktocode.com>
# Network to Code, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: ntc_install_os
short_description: Install an operating system by setting the boot options
like boot image and kickstart image
description:
- Set boot options like boot image and kickstart image.
- Reboot option for device to perform install.
- Supported platforms include Cisco Nexus switches with NX-API,
Cisco IOS switches or routers, Arista switches with eAPI,
Cisco ASA firewalls, and F5 LTMs with iControl API.
notes:
- Do not include full file paths, just the name
of the file(s) stored on the top level flash directory.
- You must know if your platform supports taking a kickstart image as a parameter.
If supplied but not supported, errors may occur.
- It may be useful to use this module in conjunction with ntc_file_copy and ntc_reboot.
- With F5, volume parameter is required.
- With NXOS devices, this module attempts to install the software immediately,
which may trigger a reboot.
- With NXOS devices, install process may take up to 10 minutes,
especially if the device reboots.
- Tested on Nexus 3000, 5000, 9000.
- In check mode, the module tells you if the image currently
booted matches C(system_image_file).
author: Jason Edelman (@jedelman8)
version_added: 1.9.2
requirements:
- pyntc
extends_documentation_fragment:
- networktocode.netauto.netauto
options:
system_image_file:
description:
- Name of the system (or combined) image file on flash.
required: true
type: str
kickstart_image_file:
description:
- Name of the kickstart image file on flash.
required: false
default: null
type: str
volume:
description:
- Volume name - required argument for F5 platform.
required: false
type: str
reboot:
description:
- Determines whether or not the device should be rebooted to complete OS installation.
required: false
default: false
type: bool
install_mode:
description:
- Determines whether OS support install mode.
required: false
default: false
type: bool
"""
EXAMPLES = r"""
- hosts: all
vars:
nxos_provider:
host: "{{ inventory_hostname }}"
username: "ntc-ansible"
password: "ntc-ansible"
platform: "cisco_nxos_nxapi"
connection: local
- name: "INSTALL OS ON NEXUS 9K"
networktocode.netauto.ntc_install_os:
ntc_host: n9k1
system_image_file: n9000-dk9.6.1.2.I3.1.bin
reboot: yes
- name: "INSTALL OS ON NEXUS 3K WITH KICKSTART"
networktocode.netauto.ntc_install_os:
ntc_host: n3k1
system_image_file: n3000-uk9.6.0.2.U6.5.bin
kickstart_image_file: n3000-uk9-kickstart.6.0.2.U6.5.bin
reboot: yes
- name: "CONFIGURE BOOT OPTIONS ON CISCO 2800"
networktocode.netauto.ntc_install_os:
ntc_host: c2801
system_image_file: c2800nm-adventerprisek9_ivs_li-mz.151-3.T4.bin
- name: "INSTALL OS ON CISCO 2800"
networktocode.netauto.ntc_install_os:
provider: "{{ ios_provider }}"
system_image_file: c2800nm-adventerprisek9_ivs_li-mz.151-3.T4.bin
reboot: yes
"""
RETURN = r"""
install_state:
returned: always
type: dict
description: Dictionary of details from install.
sample: {
"kick": "n5000-uk9-kickstart.7.2.1.N1.1.bin",
"sys": "n5000-uk9.7.2.1.N1.1.bin",
"status": "This is the log of last installation.\n
Continuing with installation process, please wait.\n
The login will be disabled until the installation is completed.\n
Performing supervisor state verification. \n
SUCCESS\n
Supervisor non-disruptive upgrade successful.\n
Install has been successful.\n",
}
"""
import time # noqa E402
from ansible.module_utils.basic import AnsibleModule # noqa E402
from ansible_collections.networktocode.netauto.plugins.module_utils.args_common import (
CONNECTION_ARGUMENT_SPEC,
MUTUALLY_EXCLUSIVE,
REQUIRED_ONE_OF,
)
try:
from pyntc import ntc_device, ntc_device_by_name # noqa E402
HAS_PYNTC = True
except ImportError:
HAS_PYNTC = False
try:
# TODO: Ensure pyntc adds __version__
from pyntc import __version__ as pyntc_version # noqa F401
from pyntc.errors import (
CommandError,
CommandListError,
FileSystemNotFoundError,
NotEnoughFreeSpaceError,
NTCFileNotFoundError,
OSInstallError,
RebootTimeoutError,
)
HAS_PYNTC_VERSION = True
except ImportError:
HAS_PYNTC_VERSION = False
# fmt: on
# PLATFORM_NXAPI = "cisco_nxos_nxapi"
# PLATFORM_IOS = "cisco_ios_ssh"
# PLATFORM_EAPI = "arista_eos_eapi"
PLATFORM_F5 = "f5_tmos_icontrol"
# PLATFORM_ASA = "cisco_asa_ssh"
# TODO: Remove when deprecating older pyntc
def already_set(boot_options, system_image_file, kickstart_image_file, **kwargs):
"""Checks if set."""
volume = kwargs.get("volume")
device = kwargs.get("device")
if device and volume:
return device.image_installed(image_name=system_image_file, volume=volume)
return boot_options.get("sys") == system_image_file and boot_options.get("kick") == kickstart_image_file
def main(): # pylint: disable=too-many-statements,too-many-branches,too-many-locals
"""Main execution."""
base_argument_spec = dict(
system_image_file=dict(required=True, type="str"),
kickstart_image_file=dict(required=False, type="str"),
volume=dict(required=False, type="str"),
reboot=dict(required=False, type="bool", default=False),
install_mode=dict(required=False, type="bool", default=None),
)
argument_spec = base_argument_spec
argument_spec.update(CONNECTION_ARGUMENT_SPEC)
argument_spec["provider"] = dict(required=False, type="dict", options=CONNECTION_ARGUMENT_SPEC)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=MUTUALLY_EXCLUSIVE,
required_one_of=[REQUIRED_ONE_OF],
required_if=[["platform", PLATFORM_F5, ["volume"]]],
supports_check_mode=True,
)
if not HAS_PYNTC:
module.fail_json(msg="pyntc is required for this module.")
# TODO: Change to fail_json when deprecating older pyntc
if not HAS_PYNTC_VERSION:
module.warn("Support for pyntc version < 0.0.9 is being deprecated; please upgrade pyntc")
# TODO: Remove warning when deprecating reboot option on non-F5 devices
module.warn("Support for installing the OS without rebooting may be deprecated in the future")
provider = module.params["provider"] or {}
# allow local params to override provider
for param, pvalue in provider.items():
# TODO: Figure out exactly the purpose of this and correct truthiness or noneness
if module.params.get(param) is not False:
module.params[param] = module.params.get(param) or pvalue
platform = module.params["platform"]
host = module.params["host"]
username = module.params["username"]
password = module.params["password"]
ntc_host = module.params["ntc_host"]
ntc_conf_file = module.params["ntc_conf_file"]
transport = module.params["transport"]
port = module.params["port"]
secret = module.params["secret"]
reboot = module.params["reboot"]
# TODO: Remove checks if we require reboot for non-F5 devices
if platform == "cisco_nxos_nxapi" and not reboot:
module.fail_json(msg='NXOS requires setting the "reboot" parameter to True')
if platform != "cisco_nxos_nxapi" and reboot and not HAS_PYNTC_VERSION:
module.fail_json(msg='Using the "reboot" parameter for non-NXOS devices' "requires pyntc version > 0.0.8")
argument_check = {
"host": host,
"username": username,
"platform": platform,
"password": password,
}
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
if ntc_host is not None:
device = ntc_device_by_name(ntc_host, ntc_conf_file)
else:
kwargs = {}
if transport is not None:
kwargs["transport"] = transport
if port is not None:
kwargs["port"] = port
if secret is not None:
kwargs["secret"] = secret
device_type = platform
device = ntc_device(device_type, host, username, password, **kwargs)
system_image_file = module.params["system_image_file"]
kickstart_image_file = module.params["kickstart_image_file"]
volume = module.params["volume"]
# Get the NTC Version split
version_numbers = pyntc_version.split(".")
install_mode = module.params.get("install_mode")
if install_mode and not ((int(version_numbers[0]) > 0) or (int(version_numbers[1]) >= 16)):
module.fail_json(msg="Current version of PyNTC does not support install_mode. Please update PyNTC >= 0.16.0")
if kickstart_image_file == "null":
kickstart_image_file = None
device.open()
pre_install_boot_options = device.get_boot_options()
if not module.check_mode: # pylint: disable=too-many-nested-blocks
# TODO: Remove conditional when deprecating older pyntc
if HAS_PYNTC_VERSION:
try:
# TODO: Remove conditional if we require reboot for non-F5 devices
if reboot or device.device_type == "f5_tmos_icontrol":
changed = device.install_os(
image_name=system_image_file,
kickstart=kickstart_image_file,
volume=volume,
install_mode=install_mode,
)
else:
# TODO: Remove support if we require reboot for non-F5 devices
changed = device.set_boot_options(system_image_file)
except (
CommandError,
CommandListError,
FileSystemNotFoundError,
NotEnoughFreeSpaceError,
NTCFileNotFoundError,
OSInstallError,
RebootTimeoutError,
) as e:
module.fail_json(msg=e.message)
except Exception as e: # pylint: disable=broad-except
module.fail_json(msg=str(e))
if (
reboot
and device.device_type == "f5_tmos_icontrol"
and pre_install_boot_options["active_volume"] != volume
):
try:
changed = True
device.reboot(confirm=True, volume=volume)
except RuntimeError:
module.fail_json(
msg="Attempted reboot but did not boot to desired volume",
original_volume=pre_install_boot_options["active_volume"],
expected_volume=volume,
)
install_state = device.get_boot_options()
# TODO: Remove contents of else when deprecating older pyntc
else:
changed = False
install_state = pre_install_boot_options
if not already_set(
boot_options=pre_install_boot_options,
system_image_file=system_image_file,
kickstart_image_file=kickstart_image_file,
volume=volume,
device=device,
):
changed = True
if device.device_type == "nxos":
timeout = 600
device.set_timeout(timeout)
try:
start_time = time.time()
device.set_boot_options(system_image_file, kickstart=kickstart_image_file)
except: # nosec # noqa
pass
elapsed_time = time.time() - start_time
device.set_timeout(30)
try:
install_state = device.get_boot_options()
except: # noqa
install_state = {}
while elapsed_time < timeout and not install_state:
try:
install_state = device.get_boot_options()
except: # noqa
time.sleep(10)
elapsed_time += 10
else:
device.set_boot_options(system_image_file, kickstart=kickstart_image_file, volume=volume)
install_state = device.get_boot_options()
if not already_set(
boot_options=pre_install_boot_options,
system_image_file=system_image_file,
kickstart_image_file=kickstart_image_file,
volume=volume,
device=device,
):
module.fail_json(msg="Install not successful", install_state=install_state)
else:
if HAS_PYNTC_VERSION:
changed = device._image_booted( # pylint: disable=protected-access
image_name=system_image_file, kickstart=kickstart_image_file, volume=volume
)
# TODO: Remove contents of else when deprecating older pyntc
else:
changed = already_set(
boot_options=pre_install_boot_options,
system_image_file=system_image_file,
kickstart_image_file=kickstart_image_file,
volume=volume,
device=device,
)
install_state = pre_install_boot_options
device.close()
module.exit_json(changed=changed, install_state=install_state)
if __name__ == "__main__":
main()
|
618e3e7ac67af7d115f0d62708d93c4337fe3861
|
6f9375e722264d863734c6f44e883c9d8799386d
|
/agent/temboardagent/plugins/dashboard/__init__.py
|
224cd1f3a78b8a0de14ac90607dec577bef35854
|
[
"PostgreSQL"
] |
permissive
|
dalibo/temboard
|
8e5fcf5604c5f234a5c1be7d75e871251d80e61e
|
d26cb848f4b064e05d5e422ecc001889f224bd74
|
refs/heads/master
| 2023-08-31T05:09:26.385396
| 2023-08-30T10:09:12
| 2023-08-30T10:09:12
| 68,793,814
| 400
| 56
|
NOASSERTION
| 2023-09-12T13:50:42
| 2016-09-21T07:57:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,048
|
py
|
__init__.py
|
import logging
import time
from bottle import Bottle, default_app
from ...toolkit import taskmanager
from ...toolkit.configuration import OptionSpec
from ...toolkit.utils import utcnow
from . import db
from . import metrics
bottle = Bottle()
logger = logging.getLogger(__name__)
workers = taskmanager.WorkerSet()
@bottle.get('/')
def dashboard():
return metrics.get_metrics_queue(default_app().temboard.config)
@bottle.get('/config')
def dashboard_config():
app = default_app().temboard
return dict(
scheduler_interval=app.config.dashboard.scheduler_interval,
history_length=app.config.dashboard.history_length,
)
@bottle.get('/live')
def dashboard_live(pgpool):
return metrics.get_metrics(default_app().temboard, pgpool)
@bottle.get('/history')
def dashboard_history():
return metrics.get_history_metrics_queue(default_app().temboard.config)
@bottle.get('/buffers')
def dashboard_buffers(pgconn):
return metrics.get_buffers(pgconn)
@bottle.get('/hitratio')
def dashboard_hitratio(pgconn):
return metrics.get_hitratio(pgconn)
@bottle.get('/active_backends')
def dashboard_active_backends(pgconn):
return metrics.get_active_backends(pgconn)
@bottle.get('/cpu')
def dashboard_cpu():
return metrics.get_cpu_usage()
@bottle.get('/loadaverage')
def dashboard_loadaverage():
return metrics.get_loadaverage()
@bottle.get('/memory')
def dashboard_memory():
return metrics.get_memory_usage()
@bottle.get('/hostname')
def dashboard_hostname():
return metrics.get_hostname(default_app().temboard.config)
@bottle.get('/databases')
def dashboard_databases(pgconn):
return metrics.get_databases(pgconn)
@workers.register(pool_size=1)
def dashboard_collector_worker(app, pool=None):
logger.info("Running dashboard collector.")
data = metrics.get_metrics(app, pool)
# We don't want to store notifications in the history.
data.pop('notifications', None)
logger.debug(data)
db.add_metric(
app.config.temboard.home,
'dashboard.db',
time.time(),
data,
app.config.dashboard.history_length
)
logger.debug("Done")
BATCH_DURATION = 5 * 60 # 5 minutes
@workers.register(pool_size=1)
def dashboard_collector_batch_worker(app):
# Loop each configured interval in the batch duration.
interval = app.config.dashboard.scheduler_interval
pool = None
start = utcnow()
elapsed = 0
while elapsed < BATCH_DURATION:
if elapsed > 0:
# Throttle interval after first run.
time.sleep(interval)
try:
pool = pool or app.postgres.pool()
except Exception as e:
logger.error("Failed to connect to Postgres: %s", e)
else:
try:
for attempt in pool.auto_reconnect():
with attempt:
dashboard_collector_worker(app, pool)
except Exception as e:
logger.error("Dashboard collector error: %s", e)
elapsed = utcnow() - start
elapsed = elapsed.total_seconds()
class DashboardPlugin:
PG_MIN_VERSION = (90400, 9.4)
s = 'dashboard'
option_specs = [
OptionSpec(s, 'scheduler_interval', default=2, validator=int),
OptionSpec(s, 'history_length', default=150, validator=int),
]
del s
def __init__(self, app, **kw):
self.app = app
self.app.config.add_specs(self.option_specs)
def bootstrap(self):
db.bootstrap(self.app.config.temboard.home, 'dashboard.db')
def load(self):
default_app().mount('/dashboard', bottle)
self.app.worker_pool.add(workers)
workers.schedule(
id='dashboard_collector_batch',
redo_interval=BATCH_DURATION,
)(dashboard_collector_batch_worker)
self.app.scheduler.add(workers)
def unload(self):
self.app.scheduler.remove(workers)
self.app.worker_pool.remove(workers)
self.app.config.remove_specs(self.option_specs)
|
309c682c0f226387acc88f3e9526268fca05baab
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/java_util/scanner_nextline.py
|
661664142c3de4117f80cfd9c4af5dcc46c19c4c
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
scanner_nextline.py
|
import logging
from claripy import StringS
from ...engines.soot.values import SimSootValue_StringRef
from ..java import JavaSimProcedure
l = logging.getLogger("angr.procedures.java.scanner.nextLine")
class ScannerNextLine(JavaSimProcedure):
__provides__ = (("java.util.Scanner", "nextLine()"),)
def run(self, this): # pylint: disable=arguments-differ,unused-argument
str_ref = SimSootValue_StringRef(self.state.memory.get_new_uuid())
self.state.memory.store(str_ref, StringS("scanner_return", 100))
# save reference in global dict, so we can easily access it later
try:
self.state.globals["java.util.Scanner"].append(str_ref)
except KeyError:
self.state.globals["java.util.Scanner"] = [str_ref]
return str_ref
|
42f0aff0be619dfd9a8a2b83c3ae67380e6c5bfe
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/WithholdSignInfo.py
|
f59f0ba827e20545e5443d42e2b8ce08889ea8e5
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
WithholdSignInfo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class WithholdSignInfo(object):
def __init__(self):
self._agreement_no = None
self._agreement_status = None
self._sign_scene = None
self._sign_time = None
self._unsign_time = None
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def agreement_status(self):
return self._agreement_status
@agreement_status.setter
def agreement_status(self, value):
self._agreement_status = value
@property
def sign_scene(self):
return self._sign_scene
@sign_scene.setter
def sign_scene(self, value):
self._sign_scene = value
@property
def sign_time(self):
return self._sign_time
@sign_time.setter
def sign_time(self, value):
self._sign_time = value
@property
def unsign_time(self):
return self._unsign_time
@unsign_time.setter
def unsign_time(self, value):
self._unsign_time = value
def to_alipay_dict(self):
params = dict()
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.agreement_status:
if hasattr(self.agreement_status, 'to_alipay_dict'):
params['agreement_status'] = self.agreement_status.to_alipay_dict()
else:
params['agreement_status'] = self.agreement_status
if self.sign_scene:
if hasattr(self.sign_scene, 'to_alipay_dict'):
params['sign_scene'] = self.sign_scene.to_alipay_dict()
else:
params['sign_scene'] = self.sign_scene
if self.sign_time:
if hasattr(self.sign_time, 'to_alipay_dict'):
params['sign_time'] = self.sign_time.to_alipay_dict()
else:
params['sign_time'] = self.sign_time
if self.unsign_time:
if hasattr(self.unsign_time, 'to_alipay_dict'):
params['unsign_time'] = self.unsign_time.to_alipay_dict()
else:
params['unsign_time'] = self.unsign_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = WithholdSignInfo()
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'agreement_status' in d:
o.agreement_status = d['agreement_status']
if 'sign_scene' in d:
o.sign_scene = d['sign_scene']
if 'sign_time' in d:
o.sign_time = d['sign_time']
if 'unsign_time' in d:
o.unsign_time = d['unsign_time']
return o
|
8ddf2d6a7723f26e7b0aaa67d4d1eaae454e34e3
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/socket/http-server/main.py
|
88ebedbf7f3874e29e27cbfc06cfe7e19f6f80f9
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
main.py
|
import socket
HOST, PORT = '', 8888
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
print(f'Serving HTTP on port {PORT} ...')
while True:
print ('1 - start')
client_connection, client_address = listen_socket.accept()
# read header
request_data = b''
while True:
chunk = client_connection.recv(1) # client can send header and body at once and then it is hard to find end of header b'\r\n\r\n' - other solutions save data in file buffer (ie. BytesIO) and then tehy can use readline()
request_data += chunk
print(chunk)
if request_data.endswith(b'\r\n\r\n'):
break
print(request_data.decode('utf-8'))
# TODO: read body if POST request and `content-length` > 0
client_connection.sendall(b"""HTTP/1.1 200 OK\n\nHello, World!\n""")
client_connection.close()
print ('2 - end')
|
83011dc5da00cfc2e9525df490ac531a753dd819
|
276f30aeaebc76d259269a0c75bf3fdc4ee22dde
|
/Lane-Detection/dataset/voc_aug.py
|
787293bde94449f4b4acdf569caa898286405b44
|
[
"MIT"
] |
permissive
|
Chenzhaowei13/Light-Condition-Style-Transfer
|
a8627f3d5328ad7c495a3357c49ef48b610d86d2
|
fccc15551f1045939ff31d5d64d3af3bf10c677f
|
refs/heads/master
| 2022-12-10T02:56:20.331999
| 2022-02-09T04:01:38
| 2022-02-09T04:01:38
| 237,132,823
| 148
| 34
|
MIT
| 2022-12-07T23:37:47
| 2020-01-30T03:38:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
voc_aug.py
|
import os
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
class VOCAugDataSet(Dataset):
def __init__(self, dataset_path='/home/chenzhaowei/data/CULane/list', data_list='train', transform=None):
with open(os.path.join(dataset_path, data_list + '.txt')) as f:
self.img_list = []
self.img = []
self.label_list = []
self.exist_list = []
for line in f:
# print(line)
self.img.append(line.strip().split(" ")[0])
#print(self.img)
self.img_list.append(dataset_path.replace('/list', '') + line.strip().split(" ")[0])
#print(self.img_list)
self.label_list.append(dataset_path.replace('/list', '') + line.strip().split(" ")[1])
#print(self.label_list)
self.exist_list.append(np.array([int(line.strip().split(" ")[2]), int(line.strip().split(" ")[3]), int(line.strip().split(" ")[4]), int(line.strip().split(" ")[5])]))
#print(self.exist_list)
self.img_path = dataset_path
self.gt_path = dataset_path
self.transform = transform
self.is_testing = data_list == 'test_img' # 'val'
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
# print('img_path:')
# print(os.path.join(self.img_path, self.img_list[idx]))
# print('label_path')
# print(self.gt_path)
# print(self.label_list[idx])
# print(os.path.join(self.gt_path, self.label_list[idx]))
image = cv2.imread(os.path.join(self.img_path, self.img_list[idx])).astype(np.float32)
label = cv2.imread(os.path.join(self.gt_path, self.label_list[idx]), cv2.IMREAD_UNCHANGED)
# print(os.path.join(self.gt_path, self.label_list[idx]))
exist = self.exist_list[idx]
image = image[240:, :, :]
label = label[240:, :]
label = label.squeeze()
if self.transform:
image, label = self.transform((image, label))
image = torch.from_numpy(image).permute(2, 0, 1).contiguous().float()
label = torch.from_numpy(label).contiguous().long()
if self.is_testing:
return image, label, self.img[idx]
else:
return image, label, exist
|
0a083e6cb1002936c734f480b859ef1b36d0f295
|
5ed8fdfdb4516b6ff07f632005266e60d3b49389
|
/telethon/sync.py
|
f647670a7eef875ec6e97ebf320ed8a9b69c7ec5
|
[
"MIT"
] |
permissive
|
LonamiWebs/Telethon
|
8ad20584863a7a5c1480eb46df8f0bd910042ce8
|
128b7074881c95fcc748f312ee92d09d650fd2f9
|
refs/heads/v1
| 2023-08-09T17:52:04.533344
| 2023-08-03T17:00:55
| 2023-08-03T17:01:10
| 66,641,037
| 8,789
| 1,756
|
MIT
| 2023-09-14T16:52:06
| 2016-08-26T10:59:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
sync.py
|
"""
This magical module will rewrite all public methods in the public interface
of the library so they can run the loop on their own if it's not already
running. This rewrite may not be desirable if the end user always uses the
methods they way they should be ran, but it's incredibly useful for quick
scripts and the runtime overhead is relatively low.
Some really common methods which are hardly used offer this ability by
default, such as ``.start()`` and ``.run_until_disconnected()`` (since
you may want to start, and then run until disconnected while using async
event handlers).
"""
import asyncio
import functools
import inspect
from . import events, errors, utils, connection, helpers
from .client.account import _TakeoutClient
from .client.telegramclient import TelegramClient
from .tl import types, functions, custom
from .tl.custom import (
Draft, Dialog, MessageButton, Forward, Button,
Message, InlineResult, Conversation
)
from .tl.custom.chatgetter import ChatGetter
from .tl.custom.sendergetter import SenderGetter
def _syncify_wrap(t, method_name):
method = getattr(t, method_name)
@functools.wraps(method)
def syncified(*args, **kwargs):
coro = method(*args, **kwargs)
loop = helpers.get_running_loop()
if loop.is_running():
return coro
else:
return loop.run_until_complete(coro)
# Save an accessible reference to the original method
setattr(syncified, '__tl.sync', method)
setattr(t, method_name, syncified)
def syncify(*types):
"""
Converts all the methods in the given types (class definitions)
into synchronous, which return either the coroutine or the result
based on whether ``asyncio's`` event loop is running.
"""
# Our asynchronous generators all are `RequestIter`, which already
# provide a synchronous iterator variant, so we don't need to worry
# about asyncgenfunction's here.
for t in types:
for name in dir(t):
if not name.startswith('_') or name == '__call__':
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name)
syncify(TelegramClient, _TakeoutClient, Draft, Dialog, MessageButton,
ChatGetter, SenderGetter, Forward, Message, InlineResult, Conversation)
# Private special case, since a conversation's methods return
# futures (but the public function themselves are synchronous).
_syncify_wrap(Conversation, '_get_result')
__all__ = [
'TelegramClient', 'Button',
'types', 'functions', 'custom', 'errors',
'events', 'utils', 'connection'
]
|
eb0984bff1064816ad1e27fb2732e08baaf4de05
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/ml/azure-ai-ml/tests/registry/unittests/test_registry_schema.py
|
bb0d7843567d8eb778471a9a12923ff308563951
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,093
|
py
|
test_registry_schema.py
|
from pathlib import Path
import pytest
import yaml
from marshmallow.exceptions import ValidationError
from azure.ai.ml._schema.registry import RegistrySchema
from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, PublicNetworkAccess
from azure.ai.ml.constants._registry import AcrAccountSku, StorageAccountType
from azure.ai.ml.entities import RegistryRegionDetails, SystemCreatedAcrAccount, SystemCreatedStorageAccount
from azure.ai.ml.entities._util import load_from_dict
@pytest.mark.unittest
@pytest.mark.production_experiences_test
class TestRegistrySchema:
def test_deserialize_from_yaml(self) -> None:
path = Path("./tests/test_configs/registry/registry_valid.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
registry = load_from_dict(RegistrySchema, target, context)
assert registry
assert registry["name"] == "registry_name"
assert registry["id"] == "registry_id"
assert registry["tags"] == {"purpose": "testing", "other_tag": "value"}
assert registry["location"] == "EastUS2"
assert registry["public_network_access"] == PublicNetworkAccess.DISABLED
assert registry["intellectual_property"].publisher == "registry_publisher"
assert (
registry["container_registry"]
== "/subscriptions/sub_id/resourceGroups/some_rg/providers/Microsoft.ContainerRegistry/registries/acr_id"
)
assert len(registry["replication_locations"]) == 1
detail = registry["replication_locations"][0]
assert isinstance(detail, RegistryRegionDetails)
assert detail.location == "EastUS"
storages = detail.storage_config
assert isinstance(storages, SystemCreatedStorageAccount)
assert not storages.storage_account_hns
assert storages.storage_account_type == StorageAccountType.STANDARD_RAGRS
assert storages.replication_count == 1
def test_deserialize_from_yaml_with_system_acr(self) -> None:
path = Path("./tests/test_configs/registry/registry_valid_2.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
registry = load_from_dict(RegistrySchema, target, context)
assert registry
assert isinstance(registry["container_registry"], SystemCreatedAcrAccount)
assert registry["container_registry"].acr_account_sku == AcrAccountSku.PREMIUM
def test_deserialize_from_yaml_with_no_acr(self) -> None:
path = Path("./tests/test_configs/registry/registry_valid_3.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
registry = load_from_dict(RegistrySchema, target, context)
assert registry
assert isinstance(registry["container_registry"], SystemCreatedAcrAccount)
assert registry["container_registry"].acr_account_sku == AcrAccountSku.PREMIUM
def test_deserialize_bad_storage_account_type(self) -> None:
path = Path("./tests/test_configs/registry/registry_bad_storage_account_type.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
with pytest.raises(Exception) as e_info:
load_from_dict(RegistrySchema, target, context)
assert e_info
assert isinstance(e_info._excinfo[1], ValidationError)
assert "NOT_A_REAL_ACCOUNT_TYPE" in e_info._excinfo[1].messages[0]
assert "passed is not in set" in e_info._excinfo[1].messages[0]
def test_deserialize_bad_arm_resource_id(self) -> None:
path = Path("./tests/test_configs/registry/registry_bad_arm_resource_id.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
with pytest.raises(Exception) as e_info:
load_from_dict(RegistrySchema, target, context)
assert e_info
assert isinstance(e_info._excinfo[1], ValidationError)
assert "container_registry" in e_info._excinfo[1].messages[0]
assert "Invalid value" in e_info._excinfo[1].messages[0]
def test_deserialize_replication_counts(self) -> None:
path = Path("./tests/test_configs/registry/registry_valid_replication_count.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
registry = load_from_dict(RegistrySchema, target, context)
registry["replication_locations"][0].storage_config.replication_count == 5
path = Path("./tests/test_configs/registry/registry_bad_replication_count.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
with pytest.raises(Exception) as e_info:
load_from_dict(RegistrySchema, target, context)
assert e_info
assert isinstance(e_info._excinfo[1], ValidationError)
assert "replication_count" in e_info._excinfo[1].messages[0]
assert "Invalid value" in e_info._excinfo[1].messages[0]
path = Path("./tests/test_configs/registry/registry_valid_lone_replication_count.yaml")
with open(path, "r") as f:
target = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: path.parent}
registry = load_from_dict(RegistrySchema, target, context)
registry["replication_locations"][0].storage_config.replication_count == 6
registry["replication_locations"][0].storage_config.storage_account_hns == False
registry["replication_locations"][0].storage_config.storage_account_type == StorageAccountType.STANDARD_LRS
|
bc084acd4bfbbaac22f9de28657d8963e4b53b92
|
e39472e0c0d582a8fe90d1b236312c236f2ab332
|
/src/pyze/cli/ac-history.py
|
46219ee1d5893a57c890c707886f9974d6ec7f32
|
[
"MIT"
] |
permissive
|
jamesremuscat/pyze
|
801015857c135eada6473743caa676ba27b990d5
|
c359492287ce1a5462b8b3e7ddca11919bbf04a4
|
refs/heads/develop
| 2022-10-05T05:37:18.998067
| 2022-09-19T14:06:29
| 2022-09-19T14:06:29
| 198,670,990
| 113
| 40
|
MIT
| 2022-09-19T14:06:07
| 2019-07-24T16:21:00
|
Python
|
UTF-8
|
Python
| false
| false
| 888
|
py
|
ac-history.py
|
from .common import add_history_args, add_vehicle_args, get_vehicle
from datetime import datetime
from tabulate import tabulate
help_text = 'Show preconditioning history for your vehicle.'
def configure_parser(parser):
add_vehicle_args(parser)
add_history_args(parser)
def run(parsed_args):
v = get_vehicle(parsed_args)
now = datetime.utcnow()
if parsed_args.from_date:
from_date = min(parsed_args.from_date, now)
else:
from_date = now.replace(day=1)
if parsed_args.to:
to_date = min(parsed_args.to, now)
else:
to_date = now
print(
tabulate(
v.hvac_history(from_date, to_date),
headers={
'hvacSessionRequestDate': 'Request made',
'hvacSessionStartDate': 'Start time',
'hvacSessionEndStatus': 'Status'
}
)
)
|
97448b917d1635fb55173471fb6de0f4fffc8da3
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/storage/test_utils.py
|
90ca90630548bc95fc397c6c6ae61415b7c8bbc2
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,720
|
py
|
test_utils.py
|
import datetime
import unittest
from programy.dialog.conversation import Conversation
from programy.dialog.question import Question
from programytest.client import TestClient
class StorageEngineTestUtils(unittest.TestCase):
def user_asserts(self, storage_engine):
user_store = storage_engine.user_store()
user_store.add_user(userid="@keiffster", clientid="twitter")
user_store.add_user(userid="keiffster@gmail.com", clientid="hangouts")
user_store.add_user(userid="keiffster", clientid="telegram")
user_store.commit()
def linked_account_asserts(self, storage_engine):
linked_accounts_store = storage_engine.linked_account_store()
linked_accounts_store.link_accounts(primary_userid=1, linked_userid=2)
linked_accounts_store.link_accounts(primary_userid=1, linked_userid=3)
linked_accounts_store.commit()
def link_asserts(self, storage_engine):
link_store = storage_engine.link_store()
link_store.create_link(primary_userid=1, generated_key='AFG37CE', provided_key="Password", expires=datetime.datetime.now())
link_store.commit ()
def property_asserts(self, storage_engine):
property_store = storage_engine.property_store()
property_store.add_property(name="topic", value="*")
property_store.add_properties({"name": "Fred",
"age": "47",
"occupation": "Gardener"})
property_store.commit()
def conversation_asserts(self, storage_engine, visit=True):
client = TestClient()
client_context = client.create_client_context("user1")
conversation = Conversation(client_context)
question1 = Question.create_from_text(client_context, "Hello There")
question1.sentence(0).response = "Hi"
conversation.record_dialog(question1)
convo_store = storage_engine.conversation_store()
convo_store.store_conversation(client_context, conversation)
convo_store.commit()
def category_asserts(self, storage_engine):
category_store = storage_engine.category_store()
category_store.store_category(groupid="group1", userid="keiffster", topic="*", that=None, pattern="Hello", template="Hi there!")
category_store.commit()
def twitter_asserts(self, storage_engine, visit=True):
twitter_store = storage_engine.twitter_store()
twitter_store.store_last_message_ids(1, 2)
twitter_store.commit()
last_direct_message_id, last_status_id = twitter_store.load_last_message_ids()
self.assertEqual(1, int(last_direct_message_id))
self.assertEqual(2, int(last_status_id))
|
f258c3662132520b0f72ce2cabc7f1a974b1f37c
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Microsoft/Win32/SafeHandles.py
|
d1741b3050dde385e045b73dbacde1ee7fe5130e
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 18,674
|
py
|
SafeHandles.py
|
# encoding: utf-8
# module Microsoft.Win32.SafeHandles calls itself SafeHandles
# from mscorlib,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089,System,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class CriticalHandleMinusOneIsInvalid(CriticalHandle,IDisposable):
""" Provides a base class for Win32 critical handle implementations in which the value of -1 indicates an invalid handle. """
def Dispose(self):
"""
Dispose(self: CriticalHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.CriticalHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
"""
ReleaseHandle(self: CriticalHandle) -> bool
When overridden in a derived class,executes the code required to free the handle.
Returns: true if the handle is released successfully; otherwise,in the event of a catastrophic failure,
false. In this case,it generates a releaseHandleFailed MDA Managed Debugging Assistant.
"""
pass
def SetHandle(self,*args):
"""
SetHandle(self: CriticalHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsInvalid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the handle is invalid.
Get: IsInvalid(self: CriticalHandleMinusOneIsInvalid) -> bool
"""
handle=None
class CriticalHandleZeroOrMinusOneIsInvalid(CriticalHandle,IDisposable):
""" Provides a base class for Win32 critical handle implementations in which the value of either 0 or -1 indicates an invalid handle. """
def Dispose(self):
"""
Dispose(self: CriticalHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.CriticalHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
"""
ReleaseHandle(self: CriticalHandle) -> bool
When overridden in a derived class,executes the code required to free the handle.
Returns: true if the handle is released successfully; otherwise,in the event of a catastrophic failure,
false. In this case,it generates a releaseHandleFailed MDA Managed Debugging Assistant.
"""
pass
def SetHandle(self,*args):
"""
SetHandle(self: CriticalHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsInvalid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the handle is invalid.
Get: IsInvalid(self: CriticalHandleZeroOrMinusOneIsInvalid) -> bool
"""
handle=None
class SafeAccessTokenHandle(SafeHandle,IDisposable):
""" SafeAccessTokenHandle(handle: IntPtr) """
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeAccessTokenHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,handle):
""" __new__(cls: type,handle: IntPtr) """
pass
IsInvalid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsInvalid(self: SafeAccessTokenHandle) -> bool
"""
handle=None
InvalidHandle=None
class SafeHandleZeroOrMinusOneIsInvalid(SafeHandle,IDisposable):
""" Provides a base class for Win32 safe handle implementations in which the value of either 0 or -1 indicates an invalid handle. """
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
"""
ReleaseHandle(self: SafeHandle) -> bool
When overridden in a derived class,executes the code required to free the handle.
Returns: true if the handle is released successfully; otherwise,in the event of a catastrophic failure,
false. In this case,it generates a releaseHandleFailed MDA Managed Debugging Assistant.
"""
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,ownsHandle: bool) """
pass
IsInvalid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the handle is invalid.
Get: IsInvalid(self: SafeHandleZeroOrMinusOneIsInvalid) -> bool
"""
handle=None
class SafeFileHandle(SafeHandleZeroOrMinusOneIsInvalid,IDisposable):
"""
Represents a wrapper class for a file handle.
SafeFileHandle(preexistingHandle: IntPtr,ownsHandle: bool)
"""
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeFileHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,preexistingHandle,ownsHandle):
""" __new__(cls: type,preexistingHandle: IntPtr,ownsHandle: bool) """
pass
handle=None
class SafeHandleMinusOneIsInvalid(SafeHandle,IDisposable):
""" Provides a base class for Win32 safe handle implementations in which the value of -1 indicates an invalid handle. """
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
"""
ReleaseHandle(self: SafeHandle) -> bool
When overridden in a derived class,executes the code required to free the handle.
Returns: true if the handle is released successfully; otherwise,in the event of a catastrophic failure,
false. In this case,it generates a releaseHandleFailed MDA Managed Debugging Assistant.
"""
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,ownsHandle: bool) """
pass
IsInvalid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the handle is invalid.
Get: IsInvalid(self: SafeHandleMinusOneIsInvalid) -> bool
"""
handle=None
class SafeProcessHandle(SafeHandleZeroOrMinusOneIsInvalid,IDisposable):
""" SafeProcessHandle(existingHandle: IntPtr,ownsHandle: bool) """
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeProcessHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,existingHandle,ownsHandle):
""" __new__(cls: type,existingHandle: IntPtr,ownsHandle: bool) """
pass
handle=None
class SafeRegistryHandle(SafeHandleZeroOrMinusOneIsInvalid,IDisposable):
"""
Represents a safe handle to the Windows registry.
SafeRegistryHandle(preexistingHandle: IntPtr,ownsHandle: bool)
"""
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeRegistryHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,preexistingHandle,ownsHandle):
""" __new__(cls: type,preexistingHandle: IntPtr,ownsHandle: bool) """
pass
handle=None
class SafeWaitHandle(SafeHandleZeroOrMinusOneIsInvalid,IDisposable):
"""
Represents a wrapper class for a wait handle.
SafeWaitHandle(existingHandle: IntPtr,ownsHandle: bool)
"""
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeWaitHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,existingHandle,ownsHandle):
""" __new__(cls: type,existingHandle: IntPtr,ownsHandle: bool) """
pass
handle=None
class SafeX509ChainHandle(SafeHandleZeroOrMinusOneIsInvalid,IDisposable):
# no doc
def Dispose(self):
"""
Dispose(self: SafeHandle,disposing: bool)
Releases the unmanaged resources used by the System.Runtime.InteropServices.SafeHandle class
specifying whether to perform a normal dispose operation.
disposing: true for a normal dispose operation; false to finalize the handle.
"""
pass
def ReleaseHandle(self,*args):
""" ReleaseHandle(self: SafeX509ChainHandle) -> bool """
pass
def SetHandle(self,*args):
"""
SetHandle(self: SafeHandle,handle: IntPtr)
Sets the handle to the specified pre-existing handle.
handle: The pre-existing handle to use.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
handle=None
|
a8cbc7f86e682f7d4d47ec3ce4ecfa7e1dca89cf
|
a40ad075eeadf753167a7e9be2bb41253bb443e9
|
/lit_nlp/components/metrics_test.py
|
7ce9abc1d432cd2a02d0f7a39c80a6c7c02bc6dc
|
[
"Apache-2.0"
] |
permissive
|
PAIR-code/lit
|
1cd55c5471bd24a8205174d3a40a2ec91ea56d27
|
a41130960d6ccb92acf6ffc603377eaecce8a62b
|
refs/heads/main
| 2023-09-05T15:35:22.731062
| 2022-12-02T19:48:37
| 2022-12-02T19:48:37
| 283,215,238
| 3,201
| 351
|
Apache-2.0
| 2023-09-14T06:08:56
| 2020-07-28T13:07:26
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 26,071
|
py
|
metrics_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lit_nlp.components.metrics."""
from typing import Optional, Union
from absl.testing import absltest
from absl.testing import parameterized
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.components import metrics
from lit_nlp.lib import testing_utils
LitType = types.LitType
class TestGenTextModel(lit_model.Model):
def input_spec(self) -> types.Spec:
return {'input': types.TextSegment()}
def output_spec(self) -> types.Spec:
return {'output': types.GeneratedText(parent='input')}
def predict_minibatch(self,
inputs: list[types.JsonDict]) -> list[types.JsonDict]:
return [{'output': 'test_output'}] * len(inputs)
class TestGenTextCandsModel(lit_model.Model):
def input_spec(self) -> types.Spec:
return {
'input': types.TextSegment(),
'label': types.MultiSegmentAnnotations(),
}
def output_spec(self) -> types.Spec:
return {'output': types.GeneratedTextCandidates(parent='input')}
def predict_minibatch(self,
inputs: list[types.JsonDict]) -> list[types.JsonDict]:
return [
{'output': [('gen_text one', 0.8), ('gen_text two', 0.3)]}
] * len(inputs)
_CLASSIFICATION_MODEL = testing_utils.TestModelClassification()
_GENERATED_TEXT_MODEL = TestGenTextModel()
_GEN_TEXT_CANDS_MODEL = TestGenTextCandsModel()
_REGRESSION_MODEL = testing_utils.TestIdentityRegressionModel()
class RegressionMetricsTest(parameterized.TestCase):
def setUp(self):
super(RegressionMetricsTest, self).setUp()
self.metrics = metrics.RegressionMetrics()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 3)
self.assertIn('mse', meta_spec)
self.assertIn('pearsonr', meta_spec)
self.assertIn('spearmanr', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
('cls_model', _CLASSIFICATION_MODEL, False),
('gen_text_model', _GENERATED_TEXT_MODEL, False),
('reg_model', _REGRESSION_MODEL, True),
)
def test_is_compatible(self, model: lit_model.Model, expected: bool):
"""Always false to prevent use as explainer."""
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
('regression', types.RegressionScore(), True),
('mulitclass', types.MulticlassPreds(vocab=['']), False),
('generated text', types.GeneratedText(), False))
def test_is_field_compatible(self, pred: LitType, expected: bool):
self.assertEqual(self.metrics.is_field_compatible(pred, None), expected)
@parameterized.named_parameters(
('correct', [1, 2, 3, 4], [1, 2, 3, 4], 0, 1.0, 1.0),
('incorrect', [1, 2, 3, 4], [-5, -10, 5, 6], 47.0, 0.79559, 0.799999),
('some_correct', [1, 2, 3, 4], [1, 2, 5.5, 6.3], 2.885, 0.96566, 1.0),
)
def test_compute(self, labels: list[float], preds: list[float], mse: float,
pearsonr: float, spearmanr: float):
expected = {'mse': mse, 'pearsonr': pearsonr, 'spearmanr': spearmanr}
result = self.metrics.compute(labels, preds,
types.RegressionScore(),
types.RegressionScore())
testing_utils.assert_deep_almost_equal(self, result, expected)
def test_compute_empty(self):
result = self.metrics.compute([], [], types.RegressionScore(),
types.RegressionScore())
testing_utils.assert_deep_almost_equal(self, result, {})
class MulticlassMetricsTest(parameterized.TestCase):
def setUp(self):
super(MulticlassMetricsTest, self).setUp()
self.metrics = metrics.MulticlassMetricsImpl()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 7)
self.assertIn('accuracy', meta_spec)
self.assertIn('precision', meta_spec)
self.assertIn('recall', meta_spec)
self.assertIn('f1', meta_spec)
self.assertIn('auc', meta_spec)
self.assertIn('aucpr', meta_spec)
self.assertIn('num_missing_labels', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
('cls_model', _CLASSIFICATION_MODEL, True),
('reg_model', _REGRESSION_MODEL, False),
('gen_text_model', _GENERATED_TEXT_MODEL, False),
)
def test_is_compatible(self, model: lit_model.Model, expected: bool):
"""Always false to prevent use as explainer."""
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
('multiclass', types.MulticlassPreds(vocab=['']), None, True),
('regression', types.RegressionScore(), None, False),
('generated text', types.GeneratedText(), None, False))
def test_is_field_compatible(self, pred: LitType, parent: LitType,
expected: bool):
self.assertEqual(
self.metrics.is_field_compatible(pred, parent), expected)
@parameterized.named_parameters(
(
'correct', ['0', '1', '2'], ['1', '2', '0', '1'],
[[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]],
1.0, 1.0, 1.0, 1.0
),
(
'incorrect', ['0', '1', '2'], ['1', '2', '0', '1'],
[[.1, .4, .5], [.2, .7, .1], [.1, 0, .9], [1, 0, 0]],
0.0, 0.0, 0.0, 0.0
),
(
'some_correct', ['0', '1', '2'], ['1', '2', '0', '1'],
[[.1, .4, .5], [0, .1, .9], [.1, 0, .9], [0, 1, 0]],
0.5, 0.57143, 0.5, 0.66667
),
(
'some_correct_4_class', ['0', '1', '2', '3'], ['1', '0', '2', '3'],
[[.1, .4, .2, .3], [.9, .1, 0, 0], [0, .3, .5, .2], [.1, .1, .5, .3]],
0.75, 0.66667, 0.66667, 0.66667
),
)
def test_compute_multiclass(
self, vocab: list[str], labels: list[str], preds: list[list[int]],
accuracy: float, f1: float, precision: float, recall: float):
expected = {
'accuracy': accuracy,
'f1': f1,
'precision': precision,
'recall': recall
}
result = self.metrics.compute(
labels, preds, types.CategoryLabel(),
types.MulticlassPreds(vocab=vocab, null_idx=0))
testing_utils.assert_deep_almost_equal(self, result, expected)
def test_compute_no_null_index(self):
result = self.metrics.compute(
['1', '2', '0', '1'],
[[.1, .4, .5], [0, .1, .9], [.1, 0, .9], [0, 1, 0]],
types.CategoryLabel(), types.MulticlassPreds(vocab=['0', '1', '2']))
testing_utils.assert_deep_almost_equal(self, result, {'accuracy': 0.5})
def test_compute_correct_single_class(self):
result = self.metrics.compute(
['1', '1'], [[.1, .9], [.2, .8]], types.CategoryLabel(),
types.MulticlassPreds(vocab=['0', '1'], null_idx=0))
testing_utils.assert_deep_almost_equal(self, result, {
'accuracy': 1.0,
# No AUC in this case.
'aucpr': 1.0,
'f1': 1.0,
'precision': 1.0,
'recall': 1.0,
})
def test_compute_almost_correct_single_class_with_null_idx_0(self):
result = self.metrics.compute(
['1', '0', '1'], [[.1, .9], [.9, .1], [.8, .2]], types.CategoryLabel(),
types.MulticlassPreds(vocab=['0', '1'], null_idx=0))
testing_utils.assert_deep_almost_equal(
self, result, {
'accuracy': 0.66667,
'auc': 1.0,
'aucpr': 1.0,
'f1': 0.66667,
'precision': 1.0,
'recall': 0.5,
})
def test_compute_empty_labels(self):
result = self.metrics.compute(
[], [], types.CategoryLabel(),
types.MulticlassPreds(vocab=['0', '1', '2'], null_idx=0))
testing_utils.assert_deep_almost_equal(self, result, {})
class MulticlassPairedMetricsTest(parameterized.TestCase):
def setUp(self):
super(MulticlassPairedMetricsTest, self).setUp()
self.metrics = metrics.MulticlassPairedMetricsImpl()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 3)
self.assertIn('num_pairs', meta_spec)
self.assertIn('swap_rate', meta_spec)
self.assertIn('mean_jsd', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
('cls_model', _CLASSIFICATION_MODEL, True),
('reg_model', _REGRESSION_MODEL, False),
('gen_text_model', _GENERATED_TEXT_MODEL, False),
)
def test_is_compatible(self, model: lit_model.Model, expected: bool):
"""Always false to prevent use as explainer."""
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
('multiclass', types.MulticlassPreds(vocab=['']), True),
('regression', types.RegressionScore(), False),
('generated text', types.GeneratedText(), False))
def test_is_field_compatible(self, pred: LitType, expected: bool):
self.assertEqual(self.metrics.is_field_compatible(pred, None), expected)
@parameterized.named_parameters(
('no_swaps', [[0, 1], [0, 1], [1, 0], [1, 0]], 0, 0.0, 0.0),
('one_swap', [[0, 1], [1, 0], [1, 0], [1, 0]], 0, 0.34657, 0.5),
('two_swaps', [[0, 1], [1, 0], [1, 0], [0, 1]], 0, 0.69315, 1.0),
('no_null_index', [[0, 1], [1, 0], [1, 0], [0, 1]], None, 0.69315, 1.0),
)
def test_compute_with_metadata(self, preds: list[list[int]],
null_idx: Optional[int], mean_jsd: float,
swap_rate: float):
labels = ['1', '1', '0', '0']
indices = ['7f7f85', '345ac4', '3a3112', '88bcda']
metas = [{'parentId': '345ac4'}, {}, {}, {'parentId': '3a3112'}]
expected = {'mean_jsd': mean_jsd, 'num_pairs': 2, 'swap_rate': swap_rate}
result = self.metrics.compute_with_metadata(
labels, preds, types.CategoryLabel(),
types.MulticlassPreds(vocab=['0', '1'], null_idx=null_idx), indices,
metas)
testing_utils.assert_deep_almost_equal(self, result, expected)
def test_compute_with_metadata_empty(self):
result = self.metrics.compute_with_metadata(
[], [], types.CategoryLabel(),
types.MulticlassPreds(vocab=['0', '1'], null_idx=0), [], [])
testing_utils.assert_deep_almost_equal(self, result, {})
class CorpusBLEUTest(parameterized.TestCase):
def setUp(self):
super(CorpusBLEUTest, self).setUp()
self.metrics = metrics.CorpusBLEU()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 2)
self.assertIn('corpus_bleu', meta_spec)
self.assertIn('corpus_bleu@1', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
('cls_model', _CLASSIFICATION_MODEL, False),
('reg_model', _REGRESSION_MODEL, False),
('gen_text_model', _GENERATED_TEXT_MODEL, True),
)
def test_is_compatible(self, model: lit_model.Model, expected: bool):
"""Always false to prevent use as explainer."""
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
('generated text, str', types.GeneratedText(), types.StringLitType(),
True),
('candidates, str', types.GeneratedTextCandidates(),
types.StringLitType(), True),
('bad pred, good parent', types.Scalar(), types.StringLitType(), False),
('good pred, bad parent', types.GeneratedText(), types.Scalar(), False),
('both bad', types.Scalar(), types.Scalar(), False))
def test_is_field_compatible(self, pred: LitType, parent: LitType,
expected: bool):
self.assertEqual(self.metrics.is_field_compatible(pred, parent), expected)
@parameterized.named_parameters(
('correct', ['This is a test.', 'Test one', 'A third test'], 100.0000),
(
'some_different',
['This is a test.', 'Test two', 'A third test example'], 68.037493
),
(
'all_different',
['these test.', 'Test two', 'A third test example'], 29.508062
),
)
def test_compute(self, preds: list[str], score: float):
labels = ['This is a test.', 'Test one', 'A third test']
expected = {'corpus_bleu': score}
result = self.metrics.compute(labels, preds, types.GeneratedText(),
types.GeneratedText())
testing_utils.assert_deep_almost_equal(self, result, expected)
def test_compute_empty_labels(self):
result = self.metrics.compute([], [], types.GeneratedText(),
types.GeneratedText())
testing_utils.assert_deep_almost_equal(self, result, {})
def test_compute_with_candidates(self):
# Should only score the first one (@1).
labels = ['This is a test.', 'Test two']
preds = [
[('This is a test.', -1.0), ('foobar', -20.0)],
[('Test two', -1.0), ('spam', -20.0)],
]
result = self.metrics.compute(labels, preds, types.TextSegment(),
types.GeneratedTextCandidates())
testing_utils.assert_deep_almost_equal(self, result,
{'corpus_bleu@1': 100.0000})
class RougeLTest(parameterized.TestCase):
def setUp(self):
super(RougeLTest, self).setUp()
self.metrics = metrics.RougeL()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 2)
self.assertIn('rougeL', meta_spec)
self.assertIn('rougeL@1', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
('cls_model', _CLASSIFICATION_MODEL, False),
('reg_model', _REGRESSION_MODEL, False),
('gen_text_model', _GENERATED_TEXT_MODEL, True),
)
def test_is_compatible(self, model: lit_model.Model, expected: bool):
"""Always false to prevent use as explainer."""
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
('generated text + str', types.GeneratedText(), types.StringLitType(),
True),
('candidates + str', types.GeneratedTextCandidates(),
types.StringLitType(), True),
('bad pred, good parent', types.Scalar(), types.StringLitType(), False),
('good pred, bad parent', types.GeneratedText(), types.Scalar(), False),
('both bad', types.Scalar(), types.Scalar(), False))
def test_is_field_compatible(self, pred: LitType, parent: LitType,
expected: bool):
self.assertEqual(self.metrics.is_field_compatible(pred, parent), expected)
@parameterized.named_parameters(
('correct', ['This is a test.', 'Test one', 'A third test'], 1.0),
(
'some_different',
['This is a test.', 'Test two', 'A third test example'], 0.785714
),
(
'all_different',
['these test.', 'Test two', 'A third test example'], 0.563492
),
)
def test_compute(self, preds: list[str], score: float):
labels = ['This is a test.', 'Test one', 'A third test']
expected = {'rougeL': score}
result = self.metrics.compute(labels, preds, types.TextSegment(),
types.GeneratedText())
testing_utils.assert_deep_almost_equal(self, result, expected)
def test_compute_empty(self):
result = self.metrics.compute([], [], types.GeneratedText(),
types.GeneratedText())
testing_utils.assert_deep_almost_equal(self, result, {})
def test_compute_with_candidates(self):
# Should only score the first one (@1).
labels = ['This is a test.', 'Test two']
preds = [
[('This is a test.', -1.0), ('foobar', -20.0)],
[('Test two', -1.0), ('spam', -20.0)],
]
result = self.metrics.compute(labels, preds, types.TextSegment(),
types.GeneratedTextCandidates())
testing_utils.assert_deep_almost_equal(self, result, {'rougeL@1': 1.0})
_MULTI_SEG_ANNOTATION_LABELS = [
[dtypes.AnnotationCluster(label='one', spans=[])],
[dtypes.AnnotationCluster(label='two', spans=[])],
]
class ExactMatchTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.metrics = metrics.ExactMatchMetrics()
def test_meta_spec(self):
meta_spec = self.metrics.meta_spec()
self.assertLen(meta_spec, 2)
self.assertIn('exactmatch', meta_spec)
self.assertIn('exactmatch@1', meta_spec)
for spec in meta_spec.values():
self.assertIsInstance(spec, types.MetricResult)
@parameterized.named_parameters(
dict(
testcase_name='classification',
model=_CLASSIFICATION_MODEL,
expected=False,
),
dict(
testcase_name='regression',
model=_REGRESSION_MODEL,
expected=False,
),
dict(
testcase_name='gen_text',
model=_GENERATED_TEXT_MODEL,
expected=True,
),
dict(
testcase_name='gen_text_cands',
model=_GEN_TEXT_CANDS_MODEL,
expected=True,
),
)
def test_is_compatible(self, model: LitType, expected: bool):
compat = self.metrics.is_compatible(
model, lit_dataset.NoneDataset({'test': model}))
self.assertEqual(compat, expected)
@parameterized.named_parameters(
dict(
testcase_name='gentext_multi_segment_annotations',
pred=types.GeneratedText(),
parent=types.MultiSegmentAnnotations(),
expected=True,
),
dict(
testcase_name='gentext_text',
pred=types.GeneratedText(),
parent=types.TextSegment(),
expected=True,
),
dict(
testcase_name='gencands_multi_segment_annotations',
pred=types.GeneratedTextCandidates(),
parent=types.MultiSegmentAnnotations(),
expected=True,
),
dict(
testcase_name='gencands_text',
pred=types.GeneratedTextCandidates(),
parent=types.TextSegment(),
expected=True,
),
dict(
testcase_name='gentext_scalar',
pred=types.GeneratedText(),
parent=types.Scalar(),
expected=False,
),
dict(
testcase_name='gencands_scalar',
pred=types.GeneratedTextCandidates(),
parent=types.Scalar(),
expected=False,
),
dict(
testcase_name='text_text',
pred=types.TextSegment(),
parent=types.TextSegment(),
expected=False,
),
dict(
testcase_name='text_scalar',
pred=types.TextSegment(),
parent=types.Scalar(),
expected=False,
),
)
def test_is_field_compatible(self,
pred: LitType,
parent: LitType,
expected: bool):
self.assertEqual(self.metrics.is_field_compatible(pred, parent), expected)
@parameterized.named_parameters(
# Without labels or preds, it should return an empty dict
dict(
testcase_name='no_labels',
labels=[],
preds=['one', 'two'],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedText(),
expected={},
),
dict(
testcase_name='no_preds',
labels=['one', 'two'],
preds=[],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedText(),
expected={},
),
# Tests for all, some, and none correct w/ MultiSegmentAnnotations labels
dict(
testcase_name='correct_multi_segment_annotations_gentext',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=['one', 'two'],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 1.0},
),
dict(
testcase_name='correct_multi_segment_annotations_gencands',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=[[('one', None)], [('two', None)]],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 1.0},
),
dict(
testcase_name='some_multi_segment_annotations_gentext',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=['one', 'four'],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 0.5},
),
dict(
testcase_name='some_multi_segment_annotations_gencands',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=[[('one', None)], [('four', None)]],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 0.5},
),
dict(
testcase_name='none_multi_segment_annotations_gentext',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=['three', 'four'],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 0.0},
),
dict(
testcase_name='none_multi_segment_annotations_gencands',
labels=_MULTI_SEG_ANNOTATION_LABELS,
preds=[[('three', None)], [('four', None)]],
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 0.0},
),
# Tests for all, some, and none correct w/ TextSegment labels
dict(
testcase_name='correct_text_gentext',
labels=['one', 'two'],
preds=['one', 'two'],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 1.0},
),
dict(
testcase_name='correct_text_gencands',
labels=['one', 'two'],
preds=[[('one', None)], [('two', None)]],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 1.0},
),
dict(
testcase_name='some_text_gentext',
labels=['one', 'two'],
preds=['one', 'four'],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 0.5},
),
dict(
testcase_name='some_text_gencands',
labels=['one', 'two'],
preds=[[('one', None)], [('four', None)]],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 0.5},
),
dict(
testcase_name='none_text_gentext',
labels=['one', 'two'],
preds=['three', 'four'],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedText(),
expected={'exactmatch': 0.0},
),
dict(
testcase_name='none_text_gencands',
labels=['one', 'two'],
preds=[[('three', None)], [('four', None)]],
label_spec=types.TextSegment(),
preds_spec=types.GeneratedTextCandidates(),
expected={'exactmatch@1': 0.0},
),
)
def test_compute(self,
labels: Union[list[str],
list[list[dtypes.AnnotationCluster]]],
preds,
label_spec: Union[types.MultiSegmentAnnotations,
types.TextSegment],
preds_spec: Union[types.GeneratedText,
types.GeneratedTextCandidates],
expected: dict[str, float]):
result = self.metrics.compute(labels, preds, label_spec, preds_spec)
testing_utils.assert_deep_almost_equal(self, result, expected)
@parameterized.named_parameters(
dict(
testcase_name='invalid_labels_gentext',
label_spec=types.Scalar(),
preds_spec=types.GeneratedText(),
),
dict(
testcase_name='invalid_labels_gentextcandidates',
label_spec=types.Scalar(),
preds_spec=types.GeneratedTextCandidates(),
),
dict(
testcase_name='invalid_preds_text',
label_spec=types.TextSegment(),
preds_spec=types.Scalar(),
),
dict(
testcase_name='invalid_preds_multi_segment_annotations',
label_spec=types.MultiSegmentAnnotations(),
preds_spec=types.Scalar(),
),
)
def test_compute_spec_exceptions(self,
label_spec: types.LitType,
preds_spec: types.LitType):
inputs = ['one', 'two', 'three']
preds = ['one', 'two', 'three']
with self.assertRaises(TypeError):
self.metrics.compute(inputs, preds, label_spec, preds_spec)
if __name__ == '__main__':
absltest.main()
|
1706fa2c7f90a9ef99bcc97062c9baf32fabdfe5
|
1efd2de8bf77ec00eb2fcaf5749278495946d920
|
/src/tests/ftest/ior/hard.py
|
1fa9d47de3087601b343222b124b9d98e5b8dda1
|
[
"BSD-2-Clause",
"BSD-2-Clause-Patent"
] |
permissive
|
daos-stack/daos
|
6f55bf3061fd830d5b8d28506e1295e2d3a27c38
|
ed5eed5df43a68571afe123132a743824c02637a
|
refs/heads/master
| 2023-08-31T21:43:37.606145
| 2023-08-31T16:38:00
| 2023-08-31T16:38:00
| 69,390,670
| 631
| 300
|
NOASSERTION
| 2023-09-14T18:55:15
| 2016-09-27T19:21:29
|
C
|
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
hard.py
|
"""
(C) Copyright 2018-2023 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from ior_test_base import IorTestBase
class IorHardBasic(IorTestBase):
# pylint: disable=too-few-public-methods
"""Test class Description: Runs IOR Hard with different
EC OBject types.
:avocado: recursive
"""
def test_ior_hard(self):
"""Jira ID: DAOS-7313.
Test Description:
Run IOR Hard with EC Object types.
Use Cases:
Create the pool, container and run IOR Hard with EC Objects.
:avocado: tags=all,full_regression
:avocado: tags=hw,large
:avocado: tags=ec,ec_array
:avocado: tags=IorHardBasic,ec_ior,ior_hard,test_ior_hard
"""
ior_read_flags = self.params.get("read_flags", "/run/ior/*")
self.run_ior_with_pool()
self.ior_cmd.flags.update(ior_read_flags)
self.ior_cmd.sw_wearout.update(None)
self.run_ior_with_pool(create_cont=False)
|
b8a8264bbc6db87a3d89328f19d2542fd4eabe20
|
b18330180bc2dddf483e2eb81160934dcd818ef8
|
/analysis/halContiguousRegionsTest.py
|
6b15831d558d9836fe7d9806ed1904371f679afc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ComparativeGenomicsToolkit/hal
|
3e5cc48c09bd0083d8d2eb849401ac5ed0bf8b06
|
e9e70b6c4a46a82aacf36632e1761804595d7f66
|
refs/heads/master
| 2023-08-31T06:32:09.551768
| 2023-06-22T23:55:36
| 2023-06-22T23:55:36
| 3,778,244
| 110
| 42
|
NOASSERTION
| 2023-08-31T15:13:38
| 2012-03-20T17:58:31
|
C++
|
UTF-8
|
Python
| false
| false
| 6,477
|
py
|
halContiguousRegionsTest.py
|
#!/usr/bin/env python3
import unittest
from hal.analysis.halContiguousRegions import ContiguousRegions
class ContiguousRegionsTestCase(unittest.TestCase):
def setUp(self):
self.contiguousRegions = ContiguousRegions('/dev/null', 'src',
'target', 100, '/tmp/',
10000, False, 0.25)
self.maxDiff = None
def test_mergeBlocks_noDups(self):
blocks = { 'seq1': [((0, 20, '+'), (20, 40, '+')),
((80, 100, '+'), (0, 20, '-')),
((20, 40, '+'), (50, 70, '+'))]
}
merged = { 'seq1': [((0, 20, '+'), [(20, 40, '+')]),
((20, 40, '+'), [(50, 70, '+')]),
((80, 100, '+'), [(0, 20, '-')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
def test_mergeBlocks_simpleDuped(self):
blocks = {
# overlapping in the middle of a block
'seq1': [((0, 20, '+'), (20, 40, '+')),
((10, 15, '+'), (0, 5, '+')),
((20, 40, '+'), (50, 70, '+'))],
# overlapping and going past the end of a block
'seq2': [((0, 20, '+'), (20, 40, '+')),
((10, 30, '+'), (0, 20, '+'))]
}
merged = { 'seq1': [((0, 10, '+'), [(20, 30, '+')]),
((10, 15, '+'), [(30, 35, '+'), (0, 5, '+')]),
((15, 20, '+'), [(35, 40, '+')]),
((20, 40, '+'), [(50, 70, '+')])],
'seq2': [((0, 10, '+'), [(20, 30, '+')]),
((10, 20, '+'), [(30, 40, '+'), (0, 10, '+')]),
((20, 30, '+'), [(10, 20, '+')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
def test_mergeBlocks_negativeStrandDuped(self):
blocks = {
# overlapping in the middle of a block
'seq1': [((0, 20, '+'), (20, 40, '-')),
((10, 15, '+'), (0, 5, '-')),
((20, 40, '+'), (50, 70, '-'))],
# overlapping and going past the end of a block
'seq2': [((0, 20, '+'), (20, 40, '-')),
((10, 30, '+'), (0, 20, '-'))]
}
merged = { 'seq1': [((0, 10, '+'), [(30, 40, '-')]),
((10, 15, '+'), [(25, 30, '-'), (0, 5, '-')]),
((15, 20, '+'), [(20, 25, '-')]),
((20, 40, '+'), [(50, 70, '-')])],
'seq2': [((0, 10, '+'), [(30, 40, '-')]),
((10, 20, '+'), [(20, 30, '-'), (10, 20, '-')]),
((20, 30, '+'), [(0, 10, '-')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
def test_mergeBlocks_mixedStrandsDuped(self):
blocks = {
# overlapping in the middle of a block
'seq1': [((0, 20, '+'), (20, 40, '-')),
((10, 15, '+'), (0, 5, '+')),
((20, 40, '+'), (50, 70, '-'))],
# overlapping and going past the end of a block
'seq2': [((0, 20, '+'), (20, 40, '-')),
((10, 30, '+'), (0, 20, '+'))]
}
merged = { 'seq1': [((0, 10, '+'), [(30, 40, '-')]),
((10, 15, '+'), [(25, 30, '-'), (0, 5, '+')]),
((15, 20, '+'), [(20, 25, '-')]),
((20, 40, '+'), [(50, 70, '-')])],
'seq2': [((0, 10, '+'), [(30, 40, '-')]),
((10, 20, '+'), [(20, 30, '-'), (0, 10, '+')]),
((20, 30, '+'), [(10, 20, '+')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
def test_mergeBlocks_tripled(self):
blocks = {
# stacking
'seq1': [((0, 20, '+'), (20, 40, '+')),
((0, 20, '+'), (70, 90, '-')),
((0, 20, '+'), (50, 70, '+'))],
# mixture of overlap types
'seq2': [((0, 20, '+'), (20, 40, '+')),
((10, 15, '+'), (60, 65, '-')),
((11, 30, '+'), (80, 99, '+'))]
}
merged = {
'seq1': [((0, 20, '+'), [(20, 40, '+'), (70, 90, '-'), (50, 70, '+')])],
'seq2': [((0, 10, '+'), [(20, 30, '+')]),
((10, 11, '+'), [(30, 31, '+'), (64, 65, '-')]),
((11, 15, '+'), [(31, 35, '+'), (60, 64, '-'), (80, 84, '+')]),
((15, 20, '+'), [(35, 40, '+'), (84, 89, '+')]),
((20, 30, '+'), [(89, 99, '+')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
def test_mergeBlocks_staggered(self):
blocks = {
'seq1': [((0, 5, '+'), (0, 5, '+')),
((1, 6, '+'), (5, 10, '-')),
((2, 7, '+'), (10, 15, '+')),
((3, 8, '+'), (15, 20, '-')),
((4, 9, '+'), (20, 25, '-')),
((5, 10, '+'), (25, 30, '+'))]
}
merged = {
'seq1': [((0, 1, '+'), [(0, 1, '+')]),
((1, 2, '+'), [(1, 2, '+'), (9, 10, '-')]),
((2, 3, '+'), [(2, 3, '+'), (8, 9, '-'), (10, 11, '+')]),
((3, 4, '+'), [(3, 4, '+'), (7, 8, '-'), (11, 12, '+'), (19, 20, '-')]),
((4, 5, '+'), [(4, 5, '+'), (6, 7, '-'), (12, 13, '+'), (18, 19, '-'), (24, 25, '-')]),
((5, 6, '+'), [(5, 6, '-'), (13, 14, '+'), (17, 18, '-'), (23, 24, '-'), (25, 26, '+')]),
((6, 7, '+'), [(14, 15, '+'), (16, 17, '-'), (22, 23, '-'), (26, 27, '+')]),
((7, 8, '+'), [(15, 16, '-'), (21, 22, '-'), (27, 28, '+')]),
((8, 9, '+'), [(20, 21, '-'), (28, 29, '+')]),
((9, 10, '+'), [(29, 30, '+')])]
}
self.assertEqual(self.contiguousRegions.mergeBlocks(blocks), merged)
if __name__ == '__main__':
unittest.main()
|
0e8ccac5dc8825d0450eeae550fde713aa1d2d9a
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/analyses/decompiler/structured_codegen/base.py
|
32da2e8986482020f3e01236f3c4e431cd2f1091
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
base.py
|
from typing import Dict, Optional, Set, Union
from sortedcontainers import SortedDict
from ....sim_variable import SimVariable
#
# Position Mapping Classes
#
class PositionMappingElement:
__slots__ = ("start", "length", "obj")
def __init__(self, start, length, obj):
self.start: int = start
self.length: int = length
self.obj = obj
def __contains__(self, offset):
return self.start <= offset < self.start + self.length
def __repr__(self):
return "<%d-%d: %s>" % (self.start, self.start + self.length, self.obj)
class PositionMapping:
__slots__ = ("_posmap",)
DUPLICATION_CHECK = True
def __init__(self):
self._posmap: Union[SortedDict, Dict[int, PositionMappingElement]] = SortedDict()
def items(self):
return self._posmap.items()
#
# Public methods
#
def add_mapping(self, start_pos, length, obj):
# duplication check
if self.DUPLICATION_CHECK:
try:
pre = next(self._posmap.irange(maximum=start_pos, reverse=True))
if start_pos in self._posmap[pre]:
raise ValueError("New mapping is overlapping with an existing element.")
except StopIteration:
pass
self._posmap[start_pos] = PositionMappingElement(start_pos, length, obj)
def get_node(self, pos: int):
element = self.get_element(pos)
if element is None:
return None
return element.obj
def get_element(self, pos: int) -> Optional[PositionMappingElement]:
try:
pre = next(self._posmap.irange(maximum=pos, reverse=True))
except StopIteration:
return None
element = self._posmap[pre]
if pos in element:
return element
return None
class InstructionMappingElement:
__slots__ = ("ins_addr", "posmap_pos")
def __init__(self, ins_addr, posmap_pos):
self.ins_addr: int = ins_addr
self.posmap_pos: int = posmap_pos
def __contains__(self, offset: int):
return self.ins_addr == offset
def __repr__(self):
return "<%d: %d>" % (self.ins_addr, self.posmap_pos)
class InstructionMapping:
__slots__ = ("_insmap",)
def __init__(self):
self._insmap: Union[SortedDict, Dict[int, InstructionMappingElement]] = SortedDict()
def items(self):
return self._insmap.items()
def add_mapping(self, ins_addr, posmap_pos):
if ins_addr in self._insmap:
if posmap_pos <= self._insmap[ins_addr].posmap_pos:
self._insmap[ins_addr] = InstructionMappingElement(ins_addr, posmap_pos)
else:
self._insmap[ins_addr] = InstructionMappingElement(ins_addr, posmap_pos)
def get_nearest_pos(self, ins_addr: int) -> Optional[int]:
try:
pre_max = next(self._insmap.irange(maximum=ins_addr, reverse=True))
pre_min = next(self._insmap.irange(minimum=ins_addr, reverse=True))
except StopIteration:
return None
e1: InstructionMappingElement = self._insmap[pre_max]
e2: InstructionMappingElement = self._insmap[pre_min]
if abs(ins_addr - e1.ins_addr) <= abs(ins_addr - e2.ins_addr):
return e1.posmap_pos
else:
return e2.posmap_pos
class BaseStructuredCodeGenerator:
def __init__(self, flavor=None):
self.flavor = flavor
self.text = None
self.map_pos_to_node = None
self.map_pos_to_addr = None
self.map_addr_to_pos = None
self.map_ast_to_pos: Optional[Dict[SimVariable, Set[PositionMappingElement]]] = None
def reapply_options(self, options):
pass
def regenerate_text(self) -> None:
pass
def reload_variable_types(self) -> None:
pass
|
7530f80656c499ab47bb211d3a8bdd72918ff03c
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook/storage_backends.py
|
5f4d0ac2e81234b5d5e69c4b25cdac8c31e06edf
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
storage_backends.py
|
from botocore.config import Config
from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
class S3StaticStorage(S3Boto3Storage):
location = settings.AWS_STATIC_LOCATION
def __init__(self, *args, **kwargs):
self.config = Config(s3={'addressing_style': self.addressing_style,
'use_accelerate_endpoint': True},
signature_version=self.signature_version)
super().__init__(*args, **kwargs)
class S3PublicMediaStorage(S3Boto3Storage):
location = settings.AWS_PUBLIC_MEDIA_LOCATION
file_overwrite = False
def __init__(self, *args, **kwargs):
self.config = Config(s3={'addressing_style': self.addressing_style,
'use_accelerate_endpoint': True},
signature_version=self.signature_version)
super().__init__(*args, **kwargs)
class S3PrivateMediaStorage(S3Boto3Storage):
location = settings.AWS_PRIVATE_MEDIA_LOCATION
default_acl = 'private'
file_overwrite = False
custom_domain = False
def __init__(self, *args, **kwargs):
self.config = Config(s3={'addressing_style': self.addressing_style,
'use_accelerate_endpoint': True},
signature_version=self.signature_version)
super().__init__(*args, **kwargs)
|
aec7ab382dfda3758c52b33ebbb57e781075d721
|
0bcd128368e2de959ca648960ffd7944067fcf27
|
/infra/bots/recipe_modules/gsutil/examples/full.py
|
897321d7c5cd24859d53af582f58036afdea5516
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google/skia
|
ac6e39179cd33cf0c8a46d29c1a70bf78b4d74ee
|
bf6b239838d3eb56562fffd0856f4047867ae771
|
refs/heads/main
| 2023-08-31T21:03:04.620734
| 2023-08-31T18:24:15
| 2023-08-31T20:20:26
| 15,773,229
| 8,064
| 1,487
|
BSD-3-Clause
| 2023-09-11T13:42:07
| 2014-01-09T17:09:57
|
C++
|
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
full.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe which runs the Skia gsutils tests.
PYTHON_VERSION_COMPATIBILITY = "PY3"
DEPS = [
'gsutil',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
def RunSteps(api):
api.vars.setup()
api.gsutil.cp('test file', '/foo/file', 'gs://bar-bucket/file',
extra_gsutil_args=['--blah'], extra_args=['-Z'],
multithread=True)
def GenTests(api):
yield (
api.test('gsutil_tests') +
api.properties(buildername='Housekeeper-PerCommit-InfraTests',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
yield (
api.test('gsutil_win_tests') +
api.properties(buildername='Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-All',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
yield (
api.test('failed_one_upload') +
api.properties(buildername='Housekeeper-PerCommit-InfraTests',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.step_data('upload test file', retcode=1)
)
yield (
api.test('failed_all_uploads') +
api.properties(buildername='Housekeeper-PerCommit-InfraTests',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.step_data('upload test file', retcode=1) +
api.step_data('upload test file (attempt 2)', retcode=1) +
api.step_data('upload test file (attempt 3)', retcode=1) +
api.step_data('upload test file (attempt 4)', retcode=1) +
api.step_data('upload test file (attempt 5)', retcode=1)
)
|
b04ef95558ff11d5c84ed10c002f77afe0f12df4
|
94ff01bc029a3bd6a554abf7c6e609d363e50484
|
/sqlakeyset/constants.py
|
ecf2c8dd72fd326b2eff6dd96ec3cc97a02d96c9
|
[
"Unlicense"
] |
permissive
|
djrobstep/sqlakeyset
|
7ec76b73131b0cd31472d04744a622e00b8bc34c
|
6cd0d322f5bf26993777ed230cac2e4876f2a2f4
|
refs/heads/master
| 2023-08-09T04:01:20.737918
| 2023-08-04T11:41:39
| 2023-08-04T11:41:39
| 66,554,135
| 314
| 47
|
Unlicense
| 2023-08-04T11:41:41
| 2016-08-25T12:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
constants.py
|
ORDER_COL_PREFIX = "_sqlakeyset_oc_"
|
94f135a750ec9c19e521d7748a4ba18c97098bce
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/great_expectations/datasource/fluent/spark_filesystem_datasource.pyi
|
209f0edb82996657f40e272c8250aae1f7a9a3d5
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 31,665
|
pyi
|
spark_filesystem_datasource.pyi
|
import pathlib
import re
from logging import Logger
from typing import ClassVar, Literal, Optional, Type, Union
from great_expectations.compatibility.pyspark import (
types as pyspark_types,
)
from great_expectations.datasource.fluent import BatchMetadata, _SparkFilePathDatasource
from great_expectations.datasource.fluent.data_asset.data_connector import (
FilesystemDataConnector,
)
from great_expectations.datasource.fluent.interfaces import (
SortersDefinition,
)
from great_expectations.datasource.fluent.spark_file_path_datasource import (
CSVAsset,
DeltaAsset,
DirectoryCSVAsset,
DirectoryDeltaAsset,
DirectoryJSONAsset,
DirectoryORCAsset,
DirectoryParquetAsset,
DirectoryTextAsset,
JSONAsset,
ORCAsset,
ParquetAsset,
TextAsset,
)
logger: Logger
class SparkFilesystemDatasource(_SparkFilePathDatasource):
# class attributes
data_connector_type: ClassVar[Type[FilesystemDataConnector]] = ...
# instance attributes
type: Literal["spark_filesystem"] = "spark_filesystem"
base_directory: pathlib.Path
data_context_root_directory: Optional[pathlib.Path] = None
def add_csv_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# vvv spark parameters for pyspark.sql.DataFrameReader.csv() (ordered as in pyspark v3.4.0)
# path: PathOrPaths,
# NA - path determined by asset
# schema: Optional[Union[StructType, str]] = None,
spark_schema: Optional[Union[pyspark_types.StructType, str]] = None,
# sep: Optional[str] = None,
sep: Optional[str] = None,
# encoding: Optional[str] = None,
encoding: Optional[str] = None,
# quote: Optional[str] = None,
quote: Optional[str] = None,
# escape: Optional[str] = None,
escape: Optional[str] = None,
# comment: Optional[str] = None,
comment: Optional[str] = None,
# header: Optional[Union[bool, str]] = None,
header: Optional[Union[bool, str]] = None,
# inferSchema: Optional[Union[bool, str]] = None,
infer_schema: Optional[Union[bool, str]] = None,
# ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
ignore_leading_white_space: Optional[Union[bool, str]] = None,
# ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
ignore_trailing_white_space: Optional[Union[bool, str]] = None,
# nullValue: Optional[str] = None,
null_value: Optional[str] = None,
# nanValue: Optional[str] = None,
nan_value: Optional[str] = None,
# positiveInf: Optional[str] = None,
positive_inf: Optional[str] = None,
# negativeInf: Optional[str] = None,
negative_inf: Optional[str] = None,
# dateFormat: Optional[str] = None,
date_format: Optional[str] = None,
# timestampFormat: Optional[str] = None,
timestamp_format: Optional[str] = None,
# maxColumns: Optional[Union[int, str]] = None,
max_columns: Optional[Union[int, str]] = None,
# maxCharsPerColumn: Optional[Union[int, str]] = None,
max_chars_per_column: Optional[Union[int, str]] = None,
# maxMalformedLogPerPartition: Optional[Union[int, str]] = None,
max_malformed_log_per_partition: Optional[Union[int, str]] = None,
# mode: Optional[str] = None,
mode: Optional[Literal["PERMISSIVE", "DROPMALFORMED", "FAILFAST"]] = None,
# columnNameOfCorruptRecord: Optional[str] = None,
column_name_of_corrupt_record: Optional[str] = None,
# multiLine: Optional[Union[bool, str]] = None,
multi_line: Optional[Union[bool, str]] = None,
# charToEscapeQuoteEscaping: Optional[str] = None,
char_to_escape_quote_escaping: Optional[str] = None,
# samplingRatio: Optional[Union[float, str]] = None,
sampling_ratio: Optional[Union[float, str]] = None,
# enforceSchema: Optional[Union[bool, str]] = None,
enforce_schema: Optional[Union[bool, str]] = None,
# emptyValue: Optional[str] = None,
empty_value: Optional[str] = None,
# locale: Optional[str] = None,
locale: Optional[str] = None,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# pathGlobFilter: Optional[Union[bool, str]] = None,
path_glob_filter: Optional[Union[bool, str]] = None,
# recursiveFileLookup: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# modifiedBefore: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
# modifiedAfter: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
# unescapedQuoteHandling: Optional[str] = None,
unescaped_quote_handling: Optional[
Literal[
"STOP_AT_CLOSING_QUOTE",
"BACK_TO_DELIMITER",
"STOP_AT_DELIMITER",
"SKIP_VALUE",
"RAISE_ERROR",
]
] = None,
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-csv.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L604
# CSV Specific Options vvv
# prefer_date: Optional[bool] = None,
# timestamp_ntz_format: Optional[str] = None,
# enable_date_time_parsing_fallback: Optional[bool] = None,
# CSV Specific Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> CSVAsset: ...
def add_directory_csv_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# vvv spark parameters for pyspark.sql.DataFrameReader.csv() (ordered as in pyspark v3.4.0)
# path: PathOrPaths,
# NA - path determined by asset
# schema: Optional[Union[StructType, str]] = None,
spark_schema: Optional[Union[pyspark_types.StructType, str]] = None,
# sep: Optional[str] = None,
sep: Optional[str] = None,
# encoding: Optional[str] = None,
encoding: Optional[str] = None,
# quote: Optional[str] = None,
quote: Optional[str] = None,
# escape: Optional[str] = None,
escape: Optional[str] = None,
# comment: Optional[str] = None,
comment: Optional[str] = None,
# header: Optional[Union[bool, str]] = None,
header: Optional[Union[bool, str]] = None,
# inferSchema: Optional[Union[bool, str]] = None,
infer_schema: Optional[Union[bool, str]] = None,
# ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
ignore_leading_white_space: Optional[Union[bool, str]] = None,
# ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
ignore_trailing_white_space: Optional[Union[bool, str]] = None,
# nullValue: Optional[str] = None,
null_value: Optional[str] = None,
# nanValue: Optional[str] = None,
nan_value: Optional[str] = None,
# positiveInf: Optional[str] = None,
positive_inf: Optional[str] = None,
# negativeInf: Optional[str] = None,
negative_inf: Optional[str] = None,
# dateFormat: Optional[str] = None,
date_format: Optional[str] = None,
# timestampFormat: Optional[str] = None,
timestamp_format: Optional[str] = None,
# maxColumns: Optional[Union[int, str]] = None,
max_columns: Optional[Union[int, str]] = None,
# maxCharsPerColumn: Optional[Union[int, str]] = None,
max_chars_per_column: Optional[Union[int, str]] = None,
# maxMalformedLogPerPartition: Optional[Union[int, str]] = None,
max_malformed_log_per_partition: Optional[Union[int, str]] = None,
# mode: Optional[str] = None,
mode: Optional[Literal["PERMISSIVE", "DROPMALFORMED", "FAILFAST"]] = None,
# columnNameOfCorruptRecord: Optional[str] = None,
column_name_of_corrupt_record: Optional[str] = None,
# multiLine: Optional[Union[bool, str]] = None,
multi_line: Optional[Union[bool, str]] = None,
# charToEscapeQuoteEscaping: Optional[str] = None,
char_to_escape_quote_escaping: Optional[str] = None,
# samplingRatio: Optional[Union[float, str]] = None,
sampling_ratio: Optional[Union[float, str]] = None,
# enforceSchema: Optional[Union[bool, str]] = None,
enforce_schema: Optional[Union[bool, str]] = None,
# emptyValue: Optional[str] = None,
empty_value: Optional[str] = None,
# locale: Optional[str] = None,
locale: Optional[str] = None,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# pathGlobFilter: Optional[Union[bool, str]] = None,
path_glob_filter: Optional[Union[bool, str]] = None,
# recursiveFileLookup: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# modifiedBefore: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
# modifiedAfter: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
# unescapedQuoteHandling: Optional[str] = None,
unescaped_quote_handling: Optional[
Literal[
"STOP_AT_CLOSING_QUOTE",
"BACK_TO_DELIMITER",
"STOP_AT_DELIMITER",
"SKIP_VALUE",
"RAISE_ERROR",
]
] = None,
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-csv.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L604
# CSV Specific Options vvv
# prefer_date: Optional[bool] = None,
# timestamp_ntz_format: Optional[str] = None,
# enable_date_time_parsing_fallback: Optional[bool] = None,
# CSV Specific Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> DirectoryCSVAsset: ...
def add_parquet_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# vvv spark parameters for pyspark.sql.DataFrameReader.parquet() (ordered as in pyspark v3.4.0)
# See https://spark.apache.org/docs/latest/sql-data-sources-parquet.html for more info.
# Parquet Specific Options vvv
merge_schema: Optional[Union[bool, str]] = None,
datetime_rebase_mode: Optional[
Literal["EXCEPTION", "CORRECTED", "LEGACY"]
] = None,
int_96_rebase_mode: Optional[
Literal["EXCEPTION", "CORRECTED", "LEGACY"]
] = None,
# Parquet Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L473
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> ParquetAsset: ...
def add_directory_parquet_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# vvv spark parameters for pyspark.sql.DataFrameReader.parquet() (ordered as in pyspark v3.4.0)
# See https://spark.apache.org/docs/latest/sql-data-sources-parquet.html for more info.
# Parquet Specific Options vvv
merge_schema: Optional[Union[bool, str]] = None,
datetime_rebase_mode: Optional[
Literal["EXCEPTION", "CORRECTED", "LEGACY"]
] = None,
int_96_rebase_mode: Optional[
Literal["EXCEPTION", "CORRECTED", "LEGACY"]
] = None,
# Parquet Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L473
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> DirectoryParquetAsset: ...
def add_orc_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# ORC Specific Options vvv
merge_schema: Optional[Union[bool, str]] = None,
# ORC Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L473
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> ORCAsset: ...
def add_directory_orc_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# ORC Specific Options vvv
merge_schema: Optional[Union[bool, str]] = None,
# ORC Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L473
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> DirectoryORCAsset: ...
def add_json_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# vvv spark parameters for pyspark.sql.DataFrameReader.json() (ordered as in pyspark v3.4.0)
# path: Union[str, List[str], RDD[str]],
# NA - path determined by asset
# schema: Optional[Union[StructType, str]] = None,
spark_schema: Optional[Union[pyspark_types.StructType, str]] = None,
# primitivesAsString: Optional[Union[bool, str]] = None,
primitives_as_string: Optional[Union[bool, str]] = None,
# prefersDecimal: Optional[Union[bool, str]] = None,
prefers_decimal: Optional[Union[bool, str]] = None,
# allowComments: Optional[Union[bool, str]] = None,
allow_comments: Optional[Union[bool, str]] = None,
# allowUnquotedFieldNames: Optional[Union[bool, str]] = None,
allow_unquoted_field_names: Optional[Union[bool, str]] = None,
# allowSingleQuotes: Optional[Union[bool, str]] = None,
allow_single_quotes: Optional[Union[bool, str]] = None,
# allowNumericLeadingZero: Optional[Union[bool, str]] = None,
allow_numeric_leading_zero: Optional[Union[bool, str]] = None,
# allowBackslashEscapingAnyCharacter: Optional[Union[bool, str]] = None,
allow_backslash_escaping_any_character: Optional[Union[bool, str]] = None,
# mode: Optional[str] = None,
mode: Optional[Literal["PERMISSIVE", "DROPMALFORMED", "FAILFAST"]] = None,
# columnNameOfCorruptRecord: Optional[str] = None,
column_name_of_corrupt_record: Optional[str] = None,
# dateFormat: Optional[str] = None,
date_format: Optional[str] = None,
# timestampFormat: Optional[str] = None,
timestamp_format: Optional[str] = None,
# multiLine: Optional[Union[bool, str]] = None,
multi_line: Optional[Union[bool, str]] = None,
# allowUnquotedControlChars: Optional[Union[bool, str]] = None,
allow_unquoted_control_chars: Optional[Union[bool, str]] = None,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# samplingRatio: Optional[Union[float, str]] = None,
sampling_ratio: Optional[Union[float, str]] = None,
# dropFieldIfAllNull: Optional[Union[bool, str]] = None,
drop_field_if_all_null: Optional[Union[bool, str]] = None,
# encoding: Optional[str] = None,
encoding: Optional[str] = None,
# locale: Optional[str] = None,
locale: Optional[str] = None,
# pathGlobFilter: Optional[Union[bool, str]] = None,
path_glob_filter: Optional[Union[bool, str]] = None,
# recursiveFileLookup: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# modifiedBefore: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
# modifiedAfter: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
# allowNonNumericNumbers: Optional[Union[bool, str]] = None,
allow_non_numeric_numbers: Optional[Union[bool, str]] = None,
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-json.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# JSON Specific Options vvv
# timezone: str = ...,
# timestamp_ntz_format: str = "yyyy-MM-dd'T'HH:mm:ss[.SSS]",
# enable_date_time_parsing_fallback: bool = ...,
# JSON Specific Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> JSONAsset: ...
def add_directory_json_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# vvv spark parameters for pyspark.sql.DataFrameReader.json() (ordered as in pyspark v3.4.0)
# path: Union[str, List[str], RDD[str]],
# NA - path determined by asset
# schema: Optional[Union[StructType, str]] = None,
spark_schema: Optional[Union[pyspark_types.StructType, str]] = None,
# primitivesAsString: Optional[Union[bool, str]] = None,
primitives_as_string: Optional[Union[bool, str]] = None,
# prefersDecimal: Optional[Union[bool, str]] = None,
prefers_decimal: Optional[Union[bool, str]] = None,
# allowComments: Optional[Union[bool, str]] = None,
allow_comments: Optional[Union[bool, str]] = None,
# allowUnquotedFieldNames: Optional[Union[bool, str]] = None,
allow_unquoted_field_names: Optional[Union[bool, str]] = None,
# allowSingleQuotes: Optional[Union[bool, str]] = None,
allow_single_quotes: Optional[Union[bool, str]] = None,
# allowNumericLeadingZero: Optional[Union[bool, str]] = None,
allow_numeric_leading_zero: Optional[Union[bool, str]] = None,
# allowBackslashEscapingAnyCharacter: Optional[Union[bool, str]] = None,
allow_backslash_escaping_any_character: Optional[Union[bool, str]] = None,
# mode: Optional[str] = None,
mode: Optional[Literal["PERMISSIVE", "DROPMALFORMED", "FAILFAST"]] = None,
# columnNameOfCorruptRecord: Optional[str] = None,
column_name_of_corrupt_record: Optional[str] = None,
# dateFormat: Optional[str] = None,
date_format: Optional[str] = None,
# timestampFormat: Optional[str] = None,
timestamp_format: Optional[str] = None,
# multiLine: Optional[Union[bool, str]] = None,
multi_line: Optional[Union[bool, str]] = None,
# allowUnquotedControlChars: Optional[Union[bool, str]] = None,
allow_unquoted_control_chars: Optional[Union[bool, str]] = None,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# samplingRatio: Optional[Union[float, str]] = None,
sampling_ratio: Optional[Union[float, str]] = None,
# dropFieldIfAllNull: Optional[Union[bool, str]] = None,
drop_field_if_all_null: Optional[Union[bool, str]] = None,
# encoding: Optional[str] = None,
encoding: Optional[str] = None,
# locale: Optional[str] = None,
locale: Optional[str] = None,
# pathGlobFilter: Optional[Union[bool, str]] = None,
path_glob_filter: Optional[Union[bool, str]] = None,
# recursiveFileLookup: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# modifiedBefore: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
# modifiedAfter: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
# allowNonNumericNumbers: Optional[Union[bool, str]] = None,
allow_non_numeric_numbers: Optional[Union[bool, str]] = None,
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-json.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# JSON Specific Options vvv
# timezone: str = ...,
# timestamp_ntz_format: str = "yyyy-MM-dd'T'HH:mm:ss[.SSS]",
# enable_date_time_parsing_fallback: bool = ...,
# JSON Specific Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> DirectoryJSONAsset: ...
def add_text_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# Text Specific Options vvv
# wholetext: bool = False,
wholetext: bool = False,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# Text Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> TextAsset: ...
def add_directory_text_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# Spark Generic File Reader Options vvv
path_glob_filter: Optional[Union[bool, str]] = None,
modified_before: Optional[Union[bool, str]] = None,
modified_after: Optional[Union[bool, str]] = None,
recursive_file_lookup: Optional[Union[bool, str]] = None,
# Spark Generic File Reader Options ^^^
# Text Specific Options vvv
# wholetext: bool = False,
wholetext: bool = False,
# lineSep: Optional[str] = None,
line_sep: Optional[str] = None,
# Text Specific Options ^^^
# vvv pyspark Docs <> Source Code mismatch
# The following parameters are mentioned in https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html
# however do not appear in the source code https://github.com/apache/spark/blob/v3.4.0/python/pyspark/sql/readwriter.py#L309
# Spark Generic File Reader Options vvv
# ignore_corrupt_files: bool = ...,
# ignore_missing_files: bool = ...,
# Spark Generic File Reader Options ^^^
# ^^^ pyspark Docs <> Source Code mismatch
) -> DirectoryTextAsset: ...
def add_delta_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Delta Specific Options vvv
timestamp_as_of: Optional[str] = None,
version_as_of: Optional[str] = None,
# Delta Specific Options ^^^
) -> DeltaAsset: ...
def add_delta_directory_asset( # noqa: PLR0913
self,
name: str,
*,
batch_metadata: Optional[BatchMetadata] = ...,
batching_regex: re.Pattern | str = r".*",
glob_directive: str = "**/*",
order_by: Optional[SortersDefinition] = ...,
# Spark Directory Reader Options vvv
data_directory: str | pathlib.Path = ...,
# Spark Directory Reader Options ^^^
# Delta Specific Options vvv
timestamp_as_of: Optional[str] = None,
version_as_of: Optional[str] = None,
# Delta Specific Options ^^^
) -> DirectoryDeltaAsset: ...
|
437cc8c07b08a6eeb867873989bb4ce1c33042c8
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_protos/public/uac/UserV2_pb2_grpc.py
|
29b16143cee63287561319e8714201497051d3e8
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| false
| 8,096
|
py
|
UserV2_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..uac import UACService_pb2 as uac_dot_UACService__pb2
from ..uac import UserV2_pb2 as uac_dot_UserV2__pb2
class UserServiceV2Stub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.searchUsers = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/searchUsers',
request_serializer=uac_dot_UserV2__pb2.SearchUsers.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.SearchUsers.Response.FromString,
)
self.addUser = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/addUser',
request_serializer=uac_dot_UserV2__pb2.AddUserV2.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.AddUserV2.Response.FromString,
)
self.removeUser = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/removeUser',
request_serializer=uac_dot_UserV2__pb2.RemoveUserV2.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.RemoveUserV2.Response.FromString,
)
self.addServiceAccount = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/addServiceAccount',
request_serializer=uac_dot_UserV2__pb2.AddServiceAccount.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.AddServiceAccount.Response.FromString,
)
self.removeServiceAccount = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/removeServiceAccount',
request_serializer=uac_dot_UserV2__pb2.RemoveServiceAccount.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.RemoveServiceAccount.Response.FromString,
)
self.getUser = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/getUser',
request_serializer=uac_dot_UserV2__pb2.GetUserV2.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.GetUserV2.Response.FromString,
)
self.getCurrentUser = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/getCurrentUser',
request_serializer=uac_dot_UACService__pb2.Empty.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.UserDetails.FromString,
)
self.userExists = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/userExists',
request_serializer=uac_dot_UserV2__pb2.UserExists.SerializeToString,
response_deserializer=uac_dot_UserV2__pb2.UserExists.Response.FromString,
)
self.changeCurrentUserPassword = channel.unary_unary(
'/ai.verta.uac.UserServiceV2/changeCurrentUserPassword',
request_serializer=uac_dot_UserV2__pb2.ChangeCurrentUserPassword.SerializeToString,
response_deserializer=uac_dot_UACService__pb2.Empty.FromString,
)
class UserServiceV2Servicer(object):
# missing associated documentation comment in .proto file
pass
def searchUsers(self, request, context):
"""List for users inside an organization, returning details
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addUser(self, request, context):
"""Adds the given user to the organization
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def removeUser(self, request, context):
"""Removes the given user to the organization
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addServiceAccount(self, request, context):
"""Adds a service account to the organization
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def removeServiceAccount(self, request, context):
"""Removes a service account from the organization
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUser(self, request, context):
"""Get a user inside an organization, returning details
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCurrentUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def userExists(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def changeCurrentUserPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
'searchUsers': grpc.unary_unary_rpc_method_handler(
servicer.searchUsers,
request_deserializer=uac_dot_UserV2__pb2.SearchUsers.FromString,
response_serializer=uac_dot_UserV2__pb2.SearchUsers.Response.SerializeToString,
),
'addUser': grpc.unary_unary_rpc_method_handler(
servicer.addUser,
request_deserializer=uac_dot_UserV2__pb2.AddUserV2.FromString,
response_serializer=uac_dot_UserV2__pb2.AddUserV2.Response.SerializeToString,
),
'removeUser': grpc.unary_unary_rpc_method_handler(
servicer.removeUser,
request_deserializer=uac_dot_UserV2__pb2.RemoveUserV2.FromString,
response_serializer=uac_dot_UserV2__pb2.RemoveUserV2.Response.SerializeToString,
),
'addServiceAccount': grpc.unary_unary_rpc_method_handler(
servicer.addServiceAccount,
request_deserializer=uac_dot_UserV2__pb2.AddServiceAccount.FromString,
response_serializer=uac_dot_UserV2__pb2.AddServiceAccount.Response.SerializeToString,
),
'removeServiceAccount': grpc.unary_unary_rpc_method_handler(
servicer.removeServiceAccount,
request_deserializer=uac_dot_UserV2__pb2.RemoveServiceAccount.FromString,
response_serializer=uac_dot_UserV2__pb2.RemoveServiceAccount.Response.SerializeToString,
),
'getUser': grpc.unary_unary_rpc_method_handler(
servicer.getUser,
request_deserializer=uac_dot_UserV2__pb2.GetUserV2.FromString,
response_serializer=uac_dot_UserV2__pb2.GetUserV2.Response.SerializeToString,
),
'getCurrentUser': grpc.unary_unary_rpc_method_handler(
servicer.getCurrentUser,
request_deserializer=uac_dot_UACService__pb2.Empty.FromString,
response_serializer=uac_dot_UserV2__pb2.UserDetails.SerializeToString,
),
'userExists': grpc.unary_unary_rpc_method_handler(
servicer.userExists,
request_deserializer=uac_dot_UserV2__pb2.UserExists.FromString,
response_serializer=uac_dot_UserV2__pb2.UserExists.Response.SerializeToString,
),
'changeCurrentUserPassword': grpc.unary_unary_rpc_method_handler(
servicer.changeCurrentUserPassword,
request_deserializer=uac_dot_UserV2__pb2.ChangeCurrentUserPassword.FromString,
response_serializer=uac_dot_UACService__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.uac.UserServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
5416610f0b954df25c6b3cc00c183a8d9791a565
|
f8dee139258b7d971bd1cfa16bd16e356537bbac
|
/Contents/Libraries/Shared/subliminal/exceptions.py
|
ed46be1a4da776cd286674a5c4eb438782901c0b
|
[
"MIT"
] |
permissive
|
pannal/Sub-Zero.bundle
|
79673016ae68d1f2e9886fd30b8763b73a8f6cf8
|
4ced7d8c8f9f5fb47d12410f87fa33d782e9f0f4
|
refs/heads/master
| 2023-07-27T23:04:32.925845
| 2023-07-09T13:07:38
| 2023-07-09T13:08:04
| 21,959,699
| 1,820
| 178
|
NOASSERTION
| 2022-11-28T03:23:13
| 2014-07-17T22:19:13
|
Python
|
UTF-8
|
Python
| false
| false
| 807
|
py
|
exceptions.py
|
# -*- coding: utf-8 -*-
class Error(Exception):
"""Base class for exceptions in subliminal."""
pass
class ProviderError(Error):
"""Exception raised by providers."""
pass
class ConfigurationError(ProviderError):
"""Exception raised by providers when badly configured."""
pass
class AuthenticationError(ProviderError):
"""Exception raised by providers when authentication failed."""
pass
class ServiceUnavailable(ProviderError):
"""Exception raised when status is '503 Service Unavailable'."""
pass
class DownloadLimitExceeded(ProviderError):
"""Exception raised by providers when download limit is exceeded."""
pass
class DownloadLimitPerDayExceeded(ProviderError):
"""Exception raised by providers when download limit is exceeded."""
pass
|
f1f0c5fe028fd8af001e9fa29ceafe092ffeaeb5
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/services/discovery/jobs/box/suggestcli.py
|
94605c720a0ce35c0a610295f27f7d578159ec19
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
suggestcli.py
|
# ---------------------------------------------------------------------
# Suggest SNMP check check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.core.service.client import open_sync_rpc
from noc.core.service.error import RPCError
from noc.core.script.scheme import SSH
from noc.core.text import safe_shadow
class SuggestCLICheck(DiscoveryCheck):
"""
Version discovery
"""
name = "suggest_cli"
required_script = "login"
def handler(self):
if not self.object.auth_profile or not self.object.auth_profile.enable_suggest:
return
if self.object.profile.is_generic:
self.logger.info("Profile is not detected properly. Skipping")
return
message = "Unknown"
for user, password, super_password in self.object.auth_profile.iter_cli():
result, message = self.check_login(user, password, super_password)
if result:
if self.object._suggest_snmp:
ro, rw, version = self.object._suggest_snmp # noqa
else:
ro, rw, version = None, None, None # noqa
self.set_credentials(
user=user,
password=password,
super_password=super_password,
snmp_ro=ro,
snmp_rw=rw,
)
return
self.logger.info("Failed to guess CLI credentials")
self.set_problem(
alarm_class="Discovery | Guess | CLI Credentials",
message="Failed to guess CLI credentials (%s)" % message,
fatal=True,
)
def check_login(self, user, password, super_password):
self.logger.debug("Checking %s/%s/%s", user, password, super_password)
self.logger.info(
"Checking %s/%s/%s",
safe_shadow(user),
safe_shadow(password),
safe_shadow(super_password),
)
try:
r = open_sync_rpc(
"activator", pool=self.object.pool.name, calling_service="discovery"
).script(
"%s.login" % self.object.profile.name,
{
"cli_protocol": "ssh" if self.object.scheme == SSH else "telnet",
"address": self.object.address,
"user": user,
"password": password,
"super_password": super_password,
"path": None,
"raise_privileges": self.object.to_raise_privileges,
"access_preference": self.object.get_access_preference(),
},
)
self.logger.info("Result: %s, %s", r, r["message"])
return bool(r["result"]), r["message"] # bool(False) == bool(None)
except RPCError as e:
self.logger.debug("RPC Error: %s", e)
return False, ""
def set_credentials(self, user, password, super_password, snmp_ro, snmp_rw):
self.logger.info("Setting credentials")
self.object.user = user
self.object.password = password
self.object.super_password = super_password
self.object.snmp_ro = snmp_ro
self.object.snmp_rw = snmp_rw
# Reset auth profile to continue operations with new credentials
self.object.auth_profile = None
self.object.save()
|
7b6e6e1a59a0020072304fd1403e6f94820dedc0
|
fb369693686cbd93799f68bcd0b4fdcf4c65d49a
|
/zavod/zavod/exporters/senzing.py
|
3d53d842743cb66d7203593fbc102194a870bdfa
|
[
"MIT",
"CC-BY-NC-4.0"
] |
permissive
|
opensanctions/opensanctions
|
8a43c173bd9c1422b5ca3e2ec35bcac70f8f1573
|
229b59247e67ad0661abb0a6f7155a61042a32ea
|
refs/heads/main
| 2023-09-03T23:59:34.785846
| 2023-09-03T08:46:14
| 2023-09-03T08:46:14
| 47,451,451
| 155
| 32
|
MIT
| 2023-09-14T05:46:11
| 2015-12-05T10:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
senzing.py
|
from nomenklatura.senzing import senzing_record
from typing import cast, Dict, Any
from zavod.entity import Entity
from zavod.exporters.common import Exporter
from zavod.util import write_json
class SenzingExporter(Exporter):
TITLE = "Senzing entity format"
FILE_NAME = "senzing.json"
MIME_TYPE = "application/json+senzing"
def setup(self) -> None:
super().setup()
self.fh = open(self.path, "wb")
self.source_name = f"OS_{self.dataset.name.upper()}"
if self.dataset.name in ("all", "default"):
self.source_name = "OPENSANCTIONS"
def feed(self, entity: Entity) -> None:
record = senzing_record(self.source_name, entity, self.view)
if record is not None:
write_json(cast(Dict[str, Any], record), self.fh)
def finish(self) -> None:
self.fh.close()
super().finish()
|
c8156cef65ad2ab7a965a60c807f120206dd0960
|
650de196d47005cc53c9d040ddc76c386fe1f533
|
/flickr_api/upload.py
|
a6c965dd9652af6af7c8d382c71b5dda54825298
|
[] |
permissive
|
alexis-mignon/python-flickr-api
|
66a1e6823d658450f013c005771571c0254a551c
|
f515d503d7dd0c208a15d77c06e15c60684fda38
|
refs/heads/master
| 2023-08-22T07:29:56.158289
| 2023-07-26T00:12:49
| 2023-07-26T00:12:49
| 3,848,466
| 294
| 98
|
BSD-3-Clause
| 2023-07-26T00:12:50
| 2012-03-27T20:58:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,251
|
py
|
upload.py
|
"""
Upload API for Flickr.
It is separated since it requires different treatments than
the usual API.
Two functions are provided:
- upload
- replace (presently not working)
Author: Alexis Mignon (c)
email: alexis.mignon@gmail.com
Date: 06/08/2011
"""
from .flickrerrors import FlickrError, FlickrAPIError
from .objects import Photo, UploadTicket
from .method_call import get_timeout
from . import auth
import os
from xml.etree import ElementTree as ET
from six import text_type, binary_type, iteritems
import requests
UPLOAD_URL = "https://api.flickr.com/services/upload/"
REPLACE_URL = "https://api.flickr.com/services/replace/"
def format_dict(d):
d_ = {}
for k, v in iteritems(d):
if isinstance(v, bool):
v = int(v)
elif isinstance(v, text_type):
v = v.encode("utf8")
if isinstance(k, text_type):
k = k.encode("utf8")
v = binary_type(v)
d_[k] = v
return d_
def post(url, auth_handler, args, photo_file, photo_file_data=None):
args = format_dict(args)
args["api_key"] = auth_handler.key
params = auth_handler.complete_parameters(url, args)
if photo_file_data is None:
photo_file_data = open(photo_file, "rb")
files = {
"photo": (os.path.basename(photo_file), photo_file_data.read())
}
resp = requests.post(url, params, files=files, timeout=get_timeout())
data = resp.content
if resp.status_code != 200:
raise FlickrError("HTTP Error %i: %s" % (resp.status_code, resp.text))
r = ET.fromstring(data)
if r.get("stat") != 'ok':
err = r[0]
raise FlickrAPIError(int(err.get("code")), err.get("msg"))
return r
def upload(**args):
"""
Authentication:
This method requires authentication with 'write' permission.
Arguments:
photo_file
The file to upload.
title (optional)
The title of the photo.
description (optional)
A description of the photo. May contain some limited HTML.
tags (optional)
A space-separated list of tags to apply to the photo.
is_public, is_friend, is_family (optional)
Set to "0" for no, "1" for yes. Specifies who can view the photo.
safety_level (optional)
Set to "1" for Safe, "2" for Moderate, or "3" for Restricted.
content_type (optional)
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden (optional)
Set to "1" to keep the photo in global search results, "2" to hide
from public searches.
async
set to 1 for async mode, 0 for sync mode
asynchronous (optional)
Alias to async for Python >= 3.6 where async is a keyword
"""
if "asynchronous" in args:
args["async"] = args["asynchronous"]
del args["asynchronous"]
if "async" not in args:
args["async"] = False
photo_file = args.pop("photo_file")
if 'photo_file_data' in args:
photo_file_data = args.pop("photo_file_data")
else:
photo_file_data = None
r = post(UPLOAD_URL, auth.AUTH_HANDLER, args, photo_file, photo_file_data)
t = r[0]
if t.tag == 'photoid':
return Photo(
id=t.text,
editurl='https://www.flickr.com/photos/upload/edit/?ids=' + t.text
)
elif t.tag == 'ticketid':
return UploadTicket(id=t.text)
else:
raise FlickrError("Unexpected tag: %s" % t.tag)
def replace(**args):
"""
Authentication:
This method requires authentication with 'write' permission.
For details of how to obtain authentication tokens and how to sign
calls, see the authentication api spec. Note that the 'photo' parameter
should not be included in the signature. All other POST parameters
should be included when generating the signature.
Arguments:
photo_file
The file to upload.
photo_id
The ID of the photo to replace.
async (optional)
Photos may be replaced in async mode, for applications that
don't want to wait around for an upload to complete, leaving
a socket connection open the whole time. Processing photos
asynchronously is recommended. Please consult the documentation
for details.
asynchronous (optional)
Alias to async for Python >= 3.6 where async is a keyword
"""
if "asynchronous" in args:
args["async"] = args["asynchronous"]
del args["asynchronous"]
if "async" not in args:
args["async"] = False
if "photo" in args:
args["photo_id"] = args.pop("photo").id
photo_file = args.pop("photo_file")
if 'photo_file_data' in args:
photo_file_data = args.pop("photo_file_data")
else:
photo_file_data = None
r = post(REPLACE_URL, auth.AUTH_HANDLER, args, photo_file, photo_file_data)
t = r[0]
if t.tag == 'photoid':
return Photo(id=t.text)
elif t.tag == 'ticketid':
return UploadTicket(id=t.text)
else:
raise FlickrError("Unexpected tag: %s" % t.tag)
|
557db5d860dfadefe965df2b98b8f845974d7922
|
867364dc92d3236f5b42aa4fe82ee69d008d09e5
|
/insomniac/session_state.py
|
16112ff27db1e0f8eb2a6cd575573675eb8f796c
|
[
"MIT"
] |
permissive
|
alexal1/Insomniac
|
6acde5a6e4b4d50e4e0d4fb233fb2e0f98d52314
|
03e25aeaae5b38a0e47a4dfd705a3140ff2e8086
|
refs/heads/master
| 2023-09-03T16:56:23.546483
| 2022-09-03T14:21:08
| 2022-09-03T14:21:08
| 268,484,843
| 666
| 194
|
MIT
| 2022-03-01T23:12:28
| 2020-06-01T09:55:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,535
|
py
|
session_state.py
|
from abc import ABC
from insomniac.actions_types import LikeAction, InteractAction, FollowAction, GetProfileAction, ScrapeAction, \
UnfollowAction, RemoveMassFollowerAction, StoryWatchAction, CommentAction, DirectMessageAction, FilterAction, \
DirectMessageBackdateAction
from insomniac.storage import Storage, InsomniacStorage, SessionPhase
from insomniac.utils import *
class SessionState(ABC):
id = None
args = {}
startTime = None
finishTime = None
storage: Optional[Storage] = None
def __init__(self):
self.id = None
self.args = {}
self.startTime = None
self.finishTime = None
self.storage = None
def set_storage_layer(self, storage_instance):
self.storage = storage_instance
def start_session(self):
self.startTime = datetime.now()
self.start_session_impl()
print_timeless(COLOR_REPORT + "\n-------- START: " + str(self.startTime) + " --------" + COLOR_ENDC)
def start_session_impl(self):
raise NotImplementedError
def end_session(self):
if not self.is_started():
return
self.finishTime = datetime.now() # For metadata-in-memory only
if self.storage is not None:
self.storage.end_session(self.id)
print_timeless(COLOR_REPORT + "-------- FINISH: " + str(self.finishTime) + " --------" + COLOR_ENDC)
def is_started(self):
return self.startTime is not None
def is_finished(self):
return self.finishTime is not None
class InsomniacSessionState(SessionState):
SOURCE_NAME_TARGETS = "targets"
storage: Optional[InsomniacStorage] = None
app_id = None
app_version = None
my_username = None
my_followers_count = None
my_following_count = None
totalInteractions = {}
successfulInteractions = {}
totalFollowed = {}
totalComments = 0
totalDirectMessages = 0
totalLikes = 0
totalUnfollowed = 0
totalStoriesWatched = 0
removedMassFollowers = []
session_phase = SessionPhase.TASK_LOGIC
def __init__(self):
super().__init__()
self.app_id = None
self.app_version = None
self.my_username = None
self.my_followers_count = None
self.my_following_count = None
self.totalInteractions = {}
self.successfulInteractions = {}
self.totalFollowed = {}
self.totalScraped = {}
self.totalComments = 0
self.totalDirectMessages = 0
self.totalLikes = 0
self.totalGetProfile = 0
self.totalUnfollowed = 0
self.totalStoriesWatched = 0
self.removedMassFollowers = []
self.session_phase = SessionPhase.TASK_LOGIC
def start_session_impl(self):
session_id = self.storage.start_session(self.args, self.app_id, self.app_version,
self.my_followers_count, self.my_following_count)
if session_id is not None:
self.id = session_id
def start_warmap(self):
self.session_phase = SessionPhase.WARMUP
def end_warmap(self):
self.session_phase = SessionPhase.TASK_LOGIC
def add_action(self, action):
if type(action) == GetProfileAction:
self.totalGetProfile += 1
self.storage.log_get_profile_action(self.id, self.session_phase, action.user)
if type(action) == LikeAction:
self.totalLikes += 1
self.storage.log_like_action(self.id, self.session_phase, action.user, action.source_type, action.source_name)
if type(action) == FollowAction:
source_name = action.source_name if action.source_type is not None else self.SOURCE_NAME_TARGETS
if self.totalFollowed.get(source_name) is None:
self.totalFollowed[source_name] = 1
else:
self.totalFollowed[source_name] += 1
self.storage.log_follow_action(self.id, self.session_phase, action.user, action.source_type, action.source_name)
self.storage.update_follow_status(action.user, do_i_follow_him=True)
if type(action) == StoryWatchAction:
self.totalStoriesWatched += 1
self.storage.log_story_watch_action(self.id, self.session_phase, action.user, action.source_type, action.source_name)
if type(action) == CommentAction:
self.totalComments += 1
self.storage.log_comment_action(self.id, self.session_phase, action.user, action.comment, action.source_type, action.source_name)
if type(action) == DirectMessageAction:
self.totalDirectMessages += 1
self.storage.log_direct_message_action(self.id, self.session_phase, action.user, action.message)
if type(action) == DirectMessageBackdateAction:
old_time_ago = datetime(2000, 1, 1, 0, 0, 0)
self.storage.log_direct_message_action(self.id, self.session_phase, action.user, action.message, old_time_ago)
if type(action) == UnfollowAction:
self.totalUnfollowed += 1
self.storage.log_unfollow_action(self.id, self.session_phase, action.user)
self.storage.update_follow_status(action.user, do_i_follow_him=False)
if type(action) == ScrapeAction:
if self.totalScraped.get(action.source_name) is None:
self.totalScraped[action.source_name] = 1
else:
self.totalScraped[action.source_name] += 1
self.storage.log_scrape_action(self.id, self.session_phase, action.user, action.source_type, action.source_name)
self.storage.publish_scrapped_account(action.user)
if type(action) == FilterAction:
self.storage.log_filter_action(self.id, self.session_phase, action.user)
if type(action) == InteractAction:
source_name = action.source_name if action.source_type is not None else self.SOURCE_NAME_TARGETS
if self.totalInteractions.get(source_name) is None:
self.totalInteractions[source_name] = 1
else:
self.totalInteractions[source_name] += 1
if self.successfulInteractions.get(source_name) is None:
self.successfulInteractions[source_name] = 1 if action.succeed else 0
else:
if action.succeed:
self.successfulInteractions[source_name] += 1
if type(action) == RemoveMassFollowerAction:
self.removedMassFollowers.append(action.user)
|
ad7e571737d9369af2966c2a893614cf2510ff95
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/tpu/client/client.py
|
d86ba094536672f5bbba3fb321c8b8af640bc153
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 14,785
|
py
|
client.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cloud TPU Client."""
from concurrent import futures
import datetime
import json
import logging
import os
import time
import urllib
from absl import flags
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client import client # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
FLAGS = flags.FLAGS
flags.DEFINE_bool('runtime_oom_exit', True,
'Exit the script when the TPU runtime is OOM.')
flags.DEFINE_bool('hbm_oom_exit', True,
'Exit the script when the TPU HBM is OOM.')
_GKE_ENV_VARIABLE = 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'
_DEFAULT_TPUCONFIG_VARIABLE = 'TPU_CONFIG'
_ENDPOINTS_SEPARATOR = ','
_DEFAULT_ENV_VARIABLE = 'TPU_NAME'
_DISCOVERY_SERVICE_URL_ENV_VARIABLE = 'TPU_API_DISCOVERY_URL'
_GCE_METADATA_URL_ENV_VARIABLE = 'GCE_METADATA_IP'
_GCE_METADATA_ENDPOINT_ENV_VARIABLE = 'GCE_METADATA_HOST'
_DEFAULT_ENDPOINT_PORT = '8470'
_OOM_EVENT_COOL_TIME_SEC = 90
_VERSION_SWITCHER_ENDPOINT = 'http://{}:8475/requestversion'
def _utcnow():
"""A wrapper function around datetime.datetime.utcnow.
This function is created for unit testing purpose. It's not easy to do
StubOutWithMock with datetime.datetime package.
Returns:
datetime.datetime
"""
return datetime.datetime.utcnow()
def _environment_discovery_url():
return os.environ.get(_DISCOVERY_SERVICE_URL_ENV_VARIABLE)
def _gce_metadata_endpoint():
endpoint = os.environ.get(_GCE_METADATA_ENDPOINT_ENV_VARIABLE)
if not endpoint:
endpoint = os.environ.get(
_GCE_METADATA_URL_ENV_VARIABLE, 'metadata.google.internal'
)
return 'http://' + endpoint
def _request_compute_metadata(path):
req = urllib.request.Request(
'%s/computeMetadata/v1/%s' % (_gce_metadata_endpoint(), path),
headers={'Metadata-Flavor': 'Google'})
resp = urllib.request.urlopen(req)
return _as_text(resp.read())
def _environment_var_to_network_endpoints(endpoints):
"""Yields a dict with ip address and port."""
for endpoint in endpoints.split(','):
grpc_prefix = 'grpc://'
if endpoint.startswith(grpc_prefix):
endpoint = endpoint.split(grpc_prefix)[1]
parts = endpoint.split(':')
ip_address = parts[0]
port = _DEFAULT_ENDPOINT_PORT
if len(parts) > 1:
port = parts[1]
yield {
'ipAddress': ip_address,
'port': port
}
def _get_tpu_node_config():
tpu_config_env = os.environ.get(_DEFAULT_TPUCONFIG_VARIABLE)
if tpu_config_env:
return json.loads(tpu_config_env)
return None
def _get_tpu_name(tpu):
if tpu:
return tpu
for e in [_GKE_ENV_VARIABLE, _DEFAULT_ENV_VARIABLE]:
if e in os.environ:
return os.environ[e]
return None
def _as_text(s):
if isinstance(s, bytes):
return s.decode('utf-8')
return s
class Client:
"""Client for working with the Cloud TPU API.
This client is intended to be used for resolving tpu name to ip addresses.
It's recommended to use this library as a contextlib to utilize all
functionality.
"""
def __init__(self,
tpu=None,
zone=None,
project=None,
credentials='default',
service=None,
discovery_url=None):
if isinstance(tpu, list):
if not tpu:
raise ValueError('At least one TPU must be specified.')
if len(tpu) != 1:
raise NotImplementedError(
'Using multiple TPUs in a single session is not yet implemented')
tpu = tpu[0]
tpu = _get_tpu_name(tpu)
if tpu is None:
tpu_node_config = _get_tpu_node_config()
if tpu_node_config:
tpu = tpu_node_config.get('tpu_node_name')
project = project or tpu_node_config.get('project')
zone = zone or tpu_node_config.get('zone')
else:
raise ValueError('Please provide a TPU Name to connect to.')
self._tpu = _as_text(tpu)
self._use_api = not self._tpu.startswith('grpc://')
self._service = service
self._credentials = None
self._project = None
self._zone = None
self._discovery_url = None
if self._use_api:
if credentials != 'default':
self._credentials = credentials
# Automatically detect project and zone if unspecified.
if project:
self._project = _as_text(project)
else:
self._project = _request_compute_metadata('project/project-id')
if zone:
self._zone = _as_text(zone)
else:
zone_path = _request_compute_metadata('instance/zone')
self._zone = zone_path.split('/')[-1]
self._discovery_url = _environment_discovery_url() or discovery_url
def _symptom_msg(self, msg):
"""Return the structured Symptom message."""
return 'Symptom: ' + msg
def _oom_event(self, symptoms):
"""Check if a runtime OOM event is reported."""
if not symptoms:
return False
for symptom in reversed(symptoms):
if symptom['symptomType'] != 'OUT_OF_MEMORY':
continue
oom_datetime_str = symptom['createTime'].split('.')[0]
oom_datetime = datetime.datetime.strptime(oom_datetime_str,
'%Y-%m-%dT%H:%M:%S')
time_diff = _utcnow() - oom_datetime
if time_diff < datetime.timedelta(seconds=_OOM_EVENT_COOL_TIME_SEC):
logging.warning(
self._symptom_msg(
'a recent runtime OOM has occurred ~{} seconds ago. The model '
'script will terminate automatically. To prevent future OOM '
'events, please consider reducing the model size. To disable this '
'behavior, set flag --runtime_oom_exit=false when starting the '
'script.'.format(time_diff.seconds)))
return True
return False
def _hbm_oom_event(self, symptoms):
"""Check if a HBM OOM event is reported."""
if not symptoms:
return False
for symptom in reversed(symptoms):
if symptom['symptomType'] != 'HBM_OUT_OF_MEMORY':
continue
oom_datetime_str = symptom['createTime'].split('.')[0]
oom_datetime = datetime.datetime.strptime(oom_datetime_str,
'%Y-%m-%dT%H:%M:%S')
time_diff = _utcnow() - oom_datetime
if time_diff < datetime.timedelta(seconds=_OOM_EVENT_COOL_TIME_SEC):
logging.warning(
self._symptom_msg(
'a recent HBM OOM has occurred ~{} seconds ago. The model '
'script will terminate automatically. To prevent future HBM OOM '
'events, please consider reducing the model size. To disable this '
'behavior, set flag --hbm_oom_exit=false when starting the '
'script.'.format(time_diff.seconds)))
return True
return False
def _tpu_service(self):
"""Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object call this method to get a new API object whenever they need
to communicate with the Cloud API.
Raises:
RuntimeError: If the dependent Python packages are missing.
Returns:
A Google Cloud TPU API object.
"""
if self._service:
return self._service
if not _GOOGLE_API_CLIENT_INSTALLED:
raise RuntimeError('Missing runtime dependency on the Google API client. '
'Run `pip install cloud-tpu-client` to fix.')
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = client.GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build(
'tpu',
'v1',
credentials=credentials,
discoveryServiceUrl=self._discovery_url,
cache_discovery=False)
else:
return discovery.build(
'tpu', 'v1', credentials=credentials, cache_discovery=False)
def _full_name(self):
"""Returns the full Cloud name for this TPU."""
return 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, self._tpu)
def _fetch_cloud_tpu_metadata(self):
"""Returns the TPU metadata object from the TPU Get API call."""
service = self._tpu_service()
try:
r = service.projects().locations().nodes().get(name=self._full_name())
return r.execute()
except Exception as e:
raise ValueError("Could not lookup TPU metadata from name '%s'. Please "
'doublecheck the tpu argument in the TPUClusterResolver '
'constructor. Exception: %s' % (self._tpu, e))
def _get_tpu_property(self, key):
if self._use_api:
metadata = self._fetch_cloud_tpu_metadata()
return metadata.get(key)
return None
def __enter__(self):
self._open = True
def __exit__(self, type, value, traceback): # pylint: disable=redefined-builtin
del type, value, traceback
def recoverable(self):
"""Returns true if the TPU is in a state where training should eventually resume.
If false the TPU is in a unrecoverable state and should be recreated.
"""
state = self.state()
symptoms = self.symptoms()
if state and state in ['TERMINATED', 'PREEMPTED']:
return False
elif FLAGS.runtime_oom_exit and self._oom_event(symptoms):
return False
elif FLAGS.hbm_oom_exit and self._hbm_oom_event(symptoms):
return False
return True
def symptoms(self):
"""Return Cloud TPU Symptoms of the TPU."""
return self._get_tpu_property('symptoms')
def state(self):
"""Return state of the TPU."""
return self._get_tpu_property('state')
def health(self):
"""Return health of the TPU."""
return self._get_tpu_property('health')
def runtime_version(self):
"""Return runtime version of the TPU."""
if not self._use_api:
# Fallback on getting version directly from TPU.
url = _VERSION_SWITCHER_ENDPOINT.format(
self.network_endpoints()[0]['ipAddress'])
try:
req = urllib.request.Request(url)
resp = urllib.request.urlopen(req)
version_details = json.loads(resp.read())
return version_details.get('currentVersion')
except urllib.error.HTTPError as e:
status_code = e.code
if status_code == 404:
return None
else:
raise e
return self._get_tpu_property('tensorflowVersion')
def accelerator_type(self):
"""Return accelerator type of the TPU."""
return self._get_tpu_property('acceleratorType')
def api_available(self):
"""Return if the Cloud TPU API is available, if not certain features will not work."""
return self._use_api
def name(self):
"""Return the name of the tpu, or the ip address if name is not provided."""
return self._tpu
def get_local_ip(self):
"""Return the local ip address of the Google Cloud VM the workload is running on."""
return _request_compute_metadata('instance/network-interfaces/0/ip')
def network_endpoints(self):
"""Return a list of tpu endpoints."""
if not self._use_api:
return list(_environment_var_to_network_endpoints(self._tpu))
response = self._fetch_cloud_tpu_metadata()
if response.get('state') != 'READY':
raise RuntimeError('TPU "%s" is not yet ready; state: "%s"' %
(self._tpu, response.get('state')))
if 'networkEndpoints' in response:
return response['networkEndpoints']
else:
return [{'ipAddress': response['ipAddress'], 'port': response['port']}]
def wait_for_healthy(self, timeout_s=1200, interval=30):
"""Wait for TPU to become healthy or raise error if timeout reached.
Args:
timeout_s (int): The timeout in seconds for waiting TPU to become healthy.
interval (int): The interval in seconds to poll the TPU for health.
Raises:
RuntimeError: If the TPU doesn't become healthy by the timeout.
"""
timeout = time.time() + timeout_s
while self.health() != 'HEALTHY':
logging.warning(
('Waiting for TPU "%s" with state "%s" '
'and health "%s" to become healthy'),
self.name(), self.state(), self.health())
if time.time() + interval > timeout:
raise RuntimeError(
'Timed out waiting for TPU "%s" to become healthy' % self.name())
time.sleep(interval)
logging.warning('TPU "%s" is healthy.', self.name())
def configure_tpu_version(self, version, restart_type='always'):
"""Configure TPU software version.
Args:
version (string): Version of software to configure the TPU with.
restart_type (string): Restart behaviour when switching versions,
defaults to always restart. Options are 'always', 'ifNeeded'.
"""
def configure_worker(worker):
"""Configure individual TPU worker.
Args:
worker: A dict with the field ipAddress where the configure request will
be sent.
"""
ip_address = worker['ipAddress']
url = (_VERSION_SWITCHER_ENDPOINT + '/{}?restartType={}').format(
ip_address, version, restart_type)
req = urllib.request.Request(url, data=b'')
try:
urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
status_code = e.code
if status_code == 404:
raise Exception(
'Tensorflow version {} is not available on Cloud TPU, '
'try a previous nightly version or refer to '
'https://cloud.google.com/tpu/docs/release-notes for '
'the latest official version.'.format(version))
else:
raise Exception('Failed to configure worker {}'.format(ip_address))
workers = self.network_endpoints()
with futures.ThreadPoolExecutor(max_workers=len(workers)) as executor:
results = executor.map(configure_worker, workers)
for result in results:
if result:
result.result()
|
56e4cd7cba30172917f4ec6fe2d4899b80ada03e
|
9eb4da8fe0eb56a0b0e4c4d660f52f52838c91da
|
/bumblebee_status/modules/core/nic.py
|
09fe4876c69807041e6583dfd4c819c9ef1e2599
|
[
"MIT"
] |
permissive
|
tobi-wan-kenobi/bumblebee-status
|
bf53b44341f4d84c4684675af3dcb8c675579f23
|
d03e6307f5e8c0b1c0451636ac9b1e84f3529a73
|
refs/heads/main
| 2023-08-31T11:52:12.140284
| 2023-07-21T12:18:17
| 2023-07-21T12:18:17
| 72,353,166
| 1,345
| 361
|
MIT
| 2023-09-13T19:25:17
| 2016-10-30T14:07:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,688
|
py
|
nic.py
|
# pylint: disable=C0111,R0903
"""Displays the name, IP address(es) and status of each available network interface.
Requires the following python module:
* netifaces
Requires the following executable:
* iw
* (until and including 2.0.5: iwgetid)
Parameters:
* nic.exclude: Comma-separated list of interface prefixes (supporting regular expressions) to exclude (defaults to 'lo,virbr,docker,vboxnet,veth,br,.*:avahi')
* nic.include: Comma-separated list of interfaces to include
* nic.states: Comma-separated list of states to show (prefix with '^' to invert - i.e. ^down -> show all devices that are not in state down)
* nic.format: Format string (defaults to '{intf} {state} {ip} {ssid} {strength}')
* nic.strength_warning: Integer to set the threshold for warning state (defaults to 50)
* nic.strength_critical: Integer to set the threshold for critical state (defaults to 30)
"""
import re
import shutil
import netifaces
import subprocess
import core.module
import core.decorators
import util.cli
import util.format
class Module(core.module.Module):
@core.decorators.every(seconds=5)
def __init__(self, config, theme):
widgets = []
super().__init__(config, theme, widgets)
self._exclude = util.format.aslist(
self.parameter("exclude", "lo,virbr,docker,vboxnet,veth,br,.*:avahi")
)
self._include = util.format.aslist(self.parameter("include", ""))
self._states = {"include": [], "exclude": []}
for state in tuple(
filter(len, util.format.aslist(self.parameter("states", "")))
):
if state[0] == "^":
self._states["exclude"].append(state[1:])
else:
self._states["include"].append(state)
self._format = self.parameter("format", "{intf} {state} {ip} {ssid} {strength}")
self._strength_threshold_critical = self.parameter("strength_critical", 30)
self._strength_threshold_warning = self.parameter("strength_warning", 50)
# Limits for the accepted dBm values of wifi strength
self.__strength_dbm_lower_bound = -110
self.__strength_dbm_upper_bound = -30
self.iw = shutil.which("iw")
self._update_widgets(widgets)
def update(self):
self._update_widgets(self.widgets())
def state(self, widget):
states = []
if widget.get("state") == "down":
states.append("critical")
elif widget.get("state") != "up":
states.append("warning")
intf = widget.get("intf")
iftype = "wireless" if self._iswlan(intf) else "wired"
iftype = "tunnel" if self._istunnel(intf) else iftype
# "strength" is none if interface type is not wlan
strength = widget.get("strength")
if self._iswlan(intf) and strength:
if strength < self._strength_threshold_critical:
states.append("critical")
elif strength < self._strength_threshold_warning:
states.append("warning")
states.append("{}-{}".format(iftype, widget.get("state")))
return states
def _iswlan(self, intf):
# wifi, wlan, wlp, seems to work for me
if intf.startswith("w"):
return True
return False
def _istunnel(self, intf):
return intf.startswith("tun") or intf.startswith("wg")
def get_addresses(self, intf):
retval = []
try:
for ip in netifaces.ifaddresses(intf).get(netifaces.AF_INET, []):
if ip.get("addr", "") != "":
retval.append(ip.get("addr"))
except Exception:
return []
return retval
def _excluded(self, intf):
for e in self._exclude:
if re.match(e, intf):
return True
return False
def _update_widgets(self, widgets):
self.clear_widgets()
interfaces = []
for i in netifaces.interfaces():
if not self._excluded(i):
interfaces.append(i)
interfaces.extend([i for i in netifaces.interfaces() if i in self._include])
for intf in interfaces:
addr = []
state = "down"
for ip in self.get_addresses(intf):
addr.append(ip)
state = "up"
if len(self._states["exclude"]) > 0 and state in self._states["exclude"]:
continue
if (
len(self._states["include"]) > 0
and state not in self._states["include"]
):
continue
strength_dbm = self.get_strength_dbm(intf)
strength_percent = self.convert_strength_dbm_percent(strength_dbm)
widget = self.widget(intf)
if not widget:
widget = self.add_widget(name=intf)
# join/split is used to get rid of multiple whitespaces (in case SSID is not available, for instance
widget.full_text(
" ".join(
self._format.format(
ip=", ".join(addr),
intf=intf,
state=state,
strength=str(strength_percent) + "%" if strength_percent else "",
ssid=self.get_ssid(intf),
).split()
)
)
widget.set("intf", intf)
widget.set("state", state)
widget.set("strength", strength_percent)
def get_ssid(self, intf):
if not self._iswlan(intf) or self._istunnel(intf) or not self.iw:
return ""
iw_info = util.cli.execute("{} dev {} info".format(self.iw, intf))
for line in iw_info.split("\n"):
match = re.match(r"^\s+ssid\s(.+)$", line)
if match:
return match.group(1)
return ""
def get_strength_dbm(self, intf):
if not self._iswlan(intf) or self._istunnel(intf) or not self.iw:
return None
with open("/proc/net/wireless", "r") as file:
for line in file:
if intf in line:
# Remove trailing . by slicing it off ;)
strength_dbm = line.split()[3][:-1]
return util.format.asint(strength_dbm,
minimum=self.__strength_dbm_lower_bound,
maximum=self.__strength_dbm_upper_bound)
return None
def convert_strength_dbm_percent(self, signal):
return int(100 * ((signal + 100) / 70.0)) if signal else None
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
0183615e172b955ed93dd329bacb64a11deeddad
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/tests/test_immunization.py
|
62ad32010ec9d813f137985fc4c0084efba5c123
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 19,172
|
py
|
test_immunization.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Immunization
Release: R5
Version: 5.0.0
Build ID: 2aecd53
Last updated: 2023-03-26T15:21:02.749+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import immunization
def impl_immunization_1(inst):
assert inst.administeredProduct.concept.coding[0].code == "70461-0321-03"
assert (
inst.administeredProduct.concept.coding[0].system
== "urn:oid:2.16.840.1.113883.6.69"
)
assert inst.administeredProduct.concept.text == (
"Flucelvax (Influenza, injectable, MDCK, preservative free, " "quadrivalent)"
)
assert inst.doseQuantity.code == "mg"
assert inst.doseQuantity.system == "http://unitsofmeasure.org"
assert float(inst.doseQuantity.value) == float(5)
assert inst.encounter.reference == "Encounter/example"
assert inst.expirationDate == fhirtypes.Date.validate("2015-02-15")
assert inst.fundingSource.coding[0].code == "private"
assert inst.fundingSource.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-funding-" "source"
)
assert inst.id == "example"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.isSubpotent is True
assert inst.location.reference == "Location/1"
assert inst.lotNumber == "AAJN11K"
assert inst.manufacturer.reference.reference == "Organization/hl7"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.note[0].text == "Notes on adminstration of vaccine"
assert inst.occurrenceDateTime == fhirtypes.DateTime.validate("2013-01-10")
assert inst.patient.reference == "Patient/example"
assert inst.performer[0].actor.reference == "Practitioner/example"
assert inst.performer[0].function.coding[0].code == "OP"
assert (
inst.performer[0].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.performer[1].actor.reference == "Practitioner/example"
assert inst.performer[1].function.coding[0].code == "AP"
assert (
inst.performer[1].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.primarySource is True
assert inst.programEligibility[0].program.text == "VFC"
assert inst.programEligibility[0].programStatus.coding[0].code == "uninsured"
assert inst.programEligibility[0].programStatus.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-program-" "eligibility"
)
assert inst.reason[0].concept.coding[0].code == "429060002"
assert inst.reason[0].concept.coding[0].system == "http://snomed.info/sct"
assert inst.route.coding[0].code == "IM"
assert inst.route.coding[0].display == "Injection, intramuscular"
assert inst.route.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministrati" "on"
)
assert inst.site.coding[0].code == "LA"
assert inst.site.coding[0].display == "left arm"
assert (
inst.site.coding[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActSite"
)
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "FLUCEL VAX"
assert inst.vaccineCode.coding[0].system == "urn:oid:1.2.36.1.2001.1005.17"
assert inst.vaccineCode.text == "Flucelvax (Influenza)"
def test_immunization_1(base_settings):
"""No. 1 tests collection for Immunization.
Test File: immunization-example.json
"""
filename = base_settings["unittest_data_dir"] / "immunization-example.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_1(inst2)
def impl_immunization_2(inst):
assert inst.id == "historical"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.informationSource.concept.coding[0].code == "record"
assert (
inst.informationSource.concept.coding[0].system
== "http://terminology.hl7.org/CodeSystem/immunization-origin"
)
assert inst.informationSource.concept.text == "Written Record"
assert inst.location.reference == "Location/1"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.note[0].text == "Notes on adminstration of a historical vaccine"
assert inst.occurrenceString == "January 2012"
assert inst.patient.reference == "Patient/example"
assert inst.primarySource is False
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "GNFLU"
assert inst.vaccineCode.coding[0].system == "urn:oid:1.2.36.1.2001.1005.17"
assert inst.vaccineCode.text == "Influenza"
def test_immunization_2(base_settings):
"""No. 2 tests collection for Immunization.
Test File: immunization-example-historical.json
"""
filename = (
base_settings["unittest_data_dir"] / "immunization-example-historical.json"
)
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_2(inst2)
def impl_immunization_3(inst):
assert inst.doseQuantity.code == "mg"
assert inst.doseQuantity.system == "http://unitsofmeasure.org"
assert float(inst.doseQuantity.value) == float(5)
assert inst.encounter.reference == "Encounter/example"
assert inst.expirationDate == fhirtypes.Date.validate("2023-01-21")
assert inst.id == "reaction"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.isSubpotent is False
assert inst.location.reference == "Location/1"
assert inst.lotNumber == "PPL909K"
assert inst.manufacturer.reference.reference == "Organization/hl7"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.note[0].text == "Notes on adminstration of vaccine"
assert inst.occurrenceDateTime == fhirtypes.DateTime.validate("2021-09-12")
assert inst.patient.reference == "Patient/example"
assert inst.performer[0].actor.reference == "Practitioner/example"
assert inst.performer[0].function.coding[0].code == "OP"
assert (
inst.performer[0].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.performer[1].actor.reference == "Practitioner/example"
assert inst.performer[1].function.coding[0].code == "AP"
assert (
inst.performer[1].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.primarySource is True
assert inst.reaction[0].date == fhirtypes.DateTime.validate("2021-09-12")
assert inst.reaction[0].manifestation.reference.reference == "Observation/example2"
assert inst.reaction[0].reported is False
assert inst.reason[0].reference.reference == "Observation/example"
assert inst.route.coding[0].code == "IM"
assert inst.route.coding[0].display == "Injection, intramuscular"
assert inst.route.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministrati" "on"
)
assert inst.site.coding[0].code == "LA"
assert inst.site.coding[0].display == "left arm"
assert (
inst.site.coding[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActSite"
)
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "175"
assert inst.vaccineCode.coding[0].system == "http://hl7.org/fhir/sid/cvx"
assert inst.vaccineCode.text == "Rabies - IM Diploid cell culture"
def test_immunization_3(base_settings):
"""No. 3 tests collection for Immunization.
Test File: immunization-example-reaction.json
"""
filename = base_settings["unittest_data_dir"] / "immunization-example-reaction.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_3(inst2)
def impl_immunization_4(inst):
assert inst.doseQuantity.code == "mg"
assert inst.doseQuantity.system == "http://unitsofmeasure.org"
assert float(inst.doseQuantity.value) == float(5)
assert inst.encounter.reference == "Encounter/example"
assert inst.expirationDate == fhirtypes.Date.validate("2018-12-15")
assert inst.fundingSource.coding[0].code == "private"
assert inst.fundingSource.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-funding-" "source"
)
assert inst.id == "protocol"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.isSubpotent is False
assert inst.location.reference == "Location/1"
assert inst.lotNumber == "PT123F"
assert inst.manufacturer.reference.reference == "Organization/hl7"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.occurrenceDateTime == fhirtypes.DateTime.validate("2018-06-18")
assert inst.patient.reference == "Patient/example"
assert inst.performer[0].actor.reference == "Practitioner/example"
assert inst.performer[0].function.coding[0].code == "OP"
assert (
inst.performer[0].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.performer[1].actor.reference == "Practitioner/example"
assert inst.performer[1].function.coding[0].code == "AP"
assert (
inst.performer[1].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.primarySource is True
assert inst.programEligibility[0].program.text == "VFC"
assert inst.programEligibility[0].programStatus.coding[0].code == "ineligible"
assert inst.programEligibility[0].programStatus.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-program-" "eligibility"
)
assert inst.protocolApplied[0].doseNumber == "1"
assert inst.protocolApplied[0].series == "2-dose"
assert inst.protocolApplied[0].targetDisease[0].coding[0].code == "40468003"
assert (
inst.protocolApplied[0].targetDisease[0].coding[0].system
== "http://snomed.info/sct"
)
assert inst.protocolApplied[1].doseNumber == "2"
assert inst.protocolApplied[1].series == "3-dose"
assert inst.protocolApplied[1].seriesDoses == "3"
assert inst.protocolApplied[1].targetDisease[0].coding[0].code == "66071002"
assert (
inst.protocolApplied[1].targetDisease[0].coding[0].system
== "http://snomed.info/sct"
)
assert inst.route.coding[0].code == "IM"
assert inst.route.coding[0].display == "Injection, intramuscular"
assert inst.route.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministrati" "on"
)
assert inst.site.coding[0].code == "LA"
assert inst.site.coding[0].display == "left arm"
assert (
inst.site.coding[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActSite"
)
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "104"
assert inst.vaccineCode.coding[0].system == "http://hl7.org/fhir/sid/cvx"
assert inst.vaccineCode.text == "Twinrix (HepA/HepB)"
def test_immunization_4(base_settings):
"""No. 4 tests collection for Immunization.
Test File: immunization-example-protocol.json
"""
filename = base_settings["unittest_data_dir"] / "immunization-example-protocol.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_4(inst2)
def impl_immunization_5(inst):
assert inst.id == "notGiven"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.occurrenceDateTime == fhirtypes.DateTime.validate("2013-01-10")
assert inst.patient.reference == "Patient/example"
assert inst.primarySource is True
assert inst.status == "not-done"
assert inst.statusReason.coding[0].code == "MEDPREC"
assert inst.statusReason.coding[0].display == "medical precaution"
assert (
inst.statusReason.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "01"
assert inst.vaccineCode.coding[0].display == "DTP"
assert inst.vaccineCode.coding[0].system == "http://hl7.org/fhir/sid/cvx"
def test_immunization_5(base_settings):
"""No. 5 tests collection for Immunization.
Test File: immunization-example-refused.json
"""
filename = base_settings["unittest_data_dir"] / "immunization-example-refused.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_5(inst2)
def impl_immunization_6(inst):
assert inst.doseQuantity.code == "ml"
assert inst.doseQuantity.system == "http://unitsofmeasure.org"
assert float(inst.doseQuantity.value) == float(0.5)
assert inst.encounter.reference == "Encounter/example"
assert inst.expirationDate == fhirtypes.Date.validate("2015-02-28")
assert inst.fundingSource.coding[0].code == "private"
assert inst.fundingSource.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-funding-" "source"
)
assert inst.id == "subpotent"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.isSubpotent is True
assert inst.location.reference == "Location/1"
assert inst.lotNumber == "AAJN11K"
assert inst.manufacturer.reference.reference == "Organization/hl7"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.note[0].text == "Notes on adminstration of vaccine"
assert inst.occurrenceDateTime == fhirtypes.DateTime.validate("2015-01-15")
assert inst.patient.reference == "Patient/example"
assert inst.performer[0].actor.reference == "Practitioner/example"
assert inst.performer[0].function.coding[0].code == "OP"
assert (
inst.performer[0].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.performer[1].actor.reference == "Practitioner/example"
assert inst.performer[1].function.coding[0].code == "AP"
assert (
inst.performer[1].function.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v2-0443"
)
assert inst.primarySource is True
assert inst.programEligibility[0].program.text == "VFC"
assert inst.programEligibility[0].programStatus.coding[0].code == "uninsured"
assert inst.programEligibility[0].programStatus.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-program-" "eligibility"
)
assert inst.route.coding[0].code == "IM"
assert inst.route.coding[0].display == "Injection, intramuscular"
assert inst.route.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministrati" "on"
)
assert inst.site.coding[0].code == "LT"
assert inst.site.coding[0].display == "left thigh"
assert (
inst.site.coding[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActSite"
)
assert inst.status == "completed"
assert inst.subpotentReason[0].coding[0].code == "partialdose"
assert inst.subpotentReason[0].coding[0].system == (
"http://terminology.hl7.org/CodeSystem/immunization-" "subpotent-reason"
)
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "GNHEP"
assert inst.vaccineCode.coding[0].system == "urn:oid:1.2.36.1.2001.1005.17"
assert inst.vaccineCode.text == "Hepatitis B"
def test_immunization_6(base_settings):
"""No. 6 tests collection for Immunization.
Test File: immunization-example-subpotent.json
"""
filename = (
base_settings["unittest_data_dir"] / "immunization-example-subpotent.json"
)
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_immunization_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_immunization_6(inst2)
|
c1f8d008d757b877fc3f0967641c89dc3a7b9479
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltElePixelHitTripletsClusterRemoverUnseeded_cfi.py
|
be35b5393cecd67d90cab48a8327285024fa2f94
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
hltElePixelHitTripletsClusterRemoverUnseeded_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltElePixelHitTripletsClusterRemoverUnseeded = cms.EDProducer("SeedClusterRemoverPhase2",
phase2OTClusters = cms.InputTag("siPhase2Clusters"),
pixelClusters = cms.InputTag("siPixelClusters"),
trajectories = cms.InputTag("hltElePixelSeedsTripletsUnseeded")
)
|
466644740b6388ceedc45ea15237dbb549d7d78b
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/autoscaling/tag.py
|
64dab2c8490fccdf0fbb1647979ae4ec90d33a67
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 9,822
|
py
|
tag.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
autoscaling_group_name: pulumi.Input[str],
tag: pulumi.Input['TagTagArgs']):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] autoscaling_group_name: Name of the Autoscaling Group to apply the tag to.
:param pulumi.Input['TagTagArgs'] tag: Tag to create. The `tag` block is documented below.
"""
pulumi.set(__self__, "autoscaling_group_name", autoscaling_group_name)
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> pulumi.Input[str]:
"""
Name of the Autoscaling Group to apply the tag to.
"""
return pulumi.get(self, "autoscaling_group_name")
@autoscaling_group_name.setter
def autoscaling_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "autoscaling_group_name", value)
@property
@pulumi.getter
def tag(self) -> pulumi.Input['TagTagArgs']:
"""
Tag to create. The `tag` block is documented below.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: pulumi.Input['TagTagArgs']):
pulumi.set(self, "tag", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input['TagTagArgs']] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] autoscaling_group_name: Name of the Autoscaling Group to apply the tag to.
:param pulumi.Input['TagTagArgs'] tag: Tag to create. The `tag` block is documented below.
"""
if autoscaling_group_name is not None:
pulumi.set(__self__, "autoscaling_group_name", autoscaling_group_name)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Autoscaling Group to apply the tag to.
"""
return pulumi.get(self, "autoscaling_group_name")
@autoscaling_group_name.setter
def autoscaling_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "autoscaling_group_name", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input['TagTagArgs']]:
"""
Tag to create. The `tag` block is documented below.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input['TagTagArgs']]):
pulumi.set(self, "tag", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[pulumi.InputType['TagTagArgs']]] = None,
__props__=None):
"""
Manages an individual Autoscaling Group (ASG) tag. This resource should only be used in cases where ASGs are created outside the provider (e.g., ASGs implicitly created by EKS Node Groups).
> **NOTE:** This tagging resource should not be combined with the resource for managing the parent resource. For example, using `autoscaling.Group` and `autoscaling.Tag` to manage tags of the same ASG will cause a perpetual difference where the `autoscaling.Group` resource will try to remove the tag being added by the `autoscaling.Tag` resource.
> **NOTE:** This tagging resource does not use the provider `ignore_tags` configuration.
## Import
`aws_autoscaling_group_tag` can be imported by using the ASG name and key, separated by a comma (`,`), e.g.,
```sh
$ pulumi import aws:autoscaling/tag:Tag example asg-example,k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] autoscaling_group_name: Name of the Autoscaling Group to apply the tag to.
:param pulumi.Input[pulumi.InputType['TagTagArgs']] tag: Tag to create. The `tag` block is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an individual Autoscaling Group (ASG) tag. This resource should only be used in cases where ASGs are created outside the provider (e.g., ASGs implicitly created by EKS Node Groups).
> **NOTE:** This tagging resource should not be combined with the resource for managing the parent resource. For example, using `autoscaling.Group` and `autoscaling.Tag` to manage tags of the same ASG will cause a perpetual difference where the `autoscaling.Group` resource will try to remove the tag being added by the `autoscaling.Tag` resource.
> **NOTE:** This tagging resource does not use the provider `ignore_tags` configuration.
## Import
`aws_autoscaling_group_tag` can be imported by using the ASG name and key, separated by a comma (`,`), e.g.,
```sh
$ pulumi import aws:autoscaling/tag:Tag example asg-example,k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[pulumi.InputType['TagTagArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if autoscaling_group_name is None and not opts.urn:
raise TypeError("Missing required property 'autoscaling_group_name'")
__props__.__dict__["autoscaling_group_name"] = autoscaling_group_name
if tag is None and not opts.urn:
raise TypeError("Missing required property 'tag'")
__props__.__dict__["tag"] = tag
super(Tag, __self__).__init__(
'aws:autoscaling/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[pulumi.InputType['TagTagArgs']]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] autoscaling_group_name: Name of the Autoscaling Group to apply the tag to.
:param pulumi.Input[pulumi.InputType['TagTagArgs']] tag: Tag to create. The `tag` block is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["autoscaling_group_name"] = autoscaling_group_name
__props__.__dict__["tag"] = tag
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> pulumi.Output[str]:
"""
Name of the Autoscaling Group to apply the tag to.
"""
return pulumi.get(self, "autoscaling_group_name")
@property
@pulumi.getter
def tag(self) -> pulumi.Output['outputs.TagTag']:
"""
Tag to create. The `tag` block is documented below.
"""
return pulumi.get(self, "tag")
|
fb04329d141bcdd4d3ef877e21eb21a202262e5c
|
d41442e43c621465abac584fa4202e44876c097d
|
/benchmarks/src/submit/utils.py
|
cf77d6ee433d7847836d08e9bdf1214146542ef4
|
[
"MIT"
] |
permissive
|
It4innovations/hyperqueue
|
90904ba415c3bee9b93d470e89c26c4129e2f60d
|
d7312e96194e2ebb3bef04f7744dc212953c146b
|
refs/heads/main
| 2023-09-04T09:54:22.020632
| 2023-08-07T09:10:20
| 2023-08-08T11:46:13
| 349,152,473
| 171
| 18
|
MIT
| 2023-09-12T09:55:00
| 2021-03-18T16:57:34
|
Rust
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
utils.py
|
from pathlib import Path
def generate_job_dir(workdir: Path) -> Path:
"""Tries to find a directory in `workdir` which name is an integer and return a large integer
padded. The returned name is padded by zeros."""
workdir.mkdir(parents=True, exist_ok=True)
ids = []
for item in workdir.iterdir():
if item.is_dir():
try:
ids.append(int(item.name))
except BaseException:
pass
max_id = max(ids or [0])
dir_name = f"{max_id + 1:03}"
return (workdir / dir_name).absolute()
|
6f02a800934ac4976046d3d3cd537b1a3314b687
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/opusfile/all/conanfile.py
|
ec4750c742092bb38ca095f954368f08dce43273
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 7,602
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain, PkgConfigDeps
from conan.tools.layout import basic_layout
from conan.tools.microsoft import is_msvc, MSBuild, MSBuildDeps, MSBuildToolchain
import os
required_conan_version = ">=1.54.0"
class OpusFileConan(ConanFile):
name = "opusfile"
description = "stand-alone decoder library for .opus streams"
topics = ("opus", "opusfile", "audio", "decoder", "decoding", "multimedia", "sound")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/xiph/opusfile"
license = "BSD-3-Clause"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"http": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"http": True,
}
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _msbuild_configuration(self):
return "Debug" if self.settings.build_type == "Debug" else "Release"
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("ogg/1.3.5", transitive_headers=True)
self.requires("opus/1.3.1", transitive_headers=True)
if self.options.http:
self.requires("openssl/[>=1.1 <4]")
def validate(self):
if is_msvc(self) and self.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} doesn't support building as shared with Visual Studio")
def build_requirements(self):
if not is_msvc(self):
self.tool_requires("libtool/2.4.7")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.3")
if self._settings_build.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
if is_msvc(self):
tc = MSBuildToolchain(self)
tc.configuration = self._msbuild_configuration
tc.properties["WholeProgramOptimization"] = "false"
tc.generate()
deps = MSBuildDeps(self)
deps.configuration = self._msbuild_configuration
deps.generate()
else:
VirtualBuildEnv(self).generate()
tc = AutotoolsToolchain(self)
yes_no = lambda v: "yes" if v else "no"
tc.configure_args.extend([
f"--enable-http={yes_no(self.options.http)}",
"--disable-examples",
])
tc.generate()
PkgConfigDeps(self).generate()
def build(self):
apply_conandata_patches(self)
if is_msvc(self):
sln_folder = os.path.join(self.source_folder, "win32", "VS2015")
vcxproj = os.path.join(sln_folder, "opusfile.vcxproj")
if not self.options.http:
replace_in_file(self, vcxproj, "OP_ENABLE_HTTP;", "")
#==============================
# TODO: to remove once https://github.com/conan-io/conan/pull/12817 available in conan client
replace_in_file(
self, vcxproj,
"<WholeProgramOptimization>true</WholeProgramOptimization>",
"",
)
replace_in_file(
self, vcxproj,
"<PlatformToolset>v140</PlatformToolset>",
f"<PlatformToolset>{MSBuildToolchain(self).toolset}</PlatformToolset>",
)
import_conan_generators = ""
for props_file in [MSBuildToolchain.filename, "conandeps.props"]:
props_path = os.path.join(self.generators_folder, props_file)
if os.path.exists(props_path):
import_conan_generators += f"<Import Project=\"{props_path}\" />"
replace_in_file(
self, vcxproj,
"<Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />",
f"{import_conan_generators}<Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />",
)
#==============================
msbuild = MSBuild(self)
msbuild.build_type = self._msbuild_configuration
msbuild.platform = "Win32" if self.settings.arch == "x86" else msbuild.platform
msbuild.build(os.path.join(sln_folder, "opusfile.sln"), targets=["opusfile"])
else:
autotools = Autotools(self)
autotools.autoreconf()
autotools.configure()
autotools.make()
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
if is_msvc(self):
include_folder = os.path.join(self.source_folder, "include")
copy(self, "*", src=include_folder, dst=os.path.join(self.package_folder, "include", "opus"))
copy(self, "*.dll", src=self.source_folder, dst=os.path.join(self.package_folder, "bin"), keep_path=False)
copy(self, "*.lib", src=self.source_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
else:
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
fix_apple_shared_install_name(self)
def package_info(self):
self.cpp_info.components["libopusfile"].set_property("pkg_config_name", "opusfile")
self.cpp_info.components["libopusfile"].libs = ["opusfile"]
self.cpp_info.components["libopusfile"].includedirs.append(os.path.join("include", "opus"))
self.cpp_info.components["libopusfile"].requires = ["ogg::ogg", "opus::opus"]
if self.settings.os in ("FreeBSD", "Linux"):
self.cpp_info.components["libopusfile"].system_libs = ["m", "dl", "pthread"]
if is_msvc(self):
if self.options.http:
self.cpp_info.components["libopusfile"].requires.append("openssl::openssl")
else:
self.cpp_info.set_property("pkg_config_name", "opusfile-do-not-use")
self.cpp_info.components["opusurl"].set_property("pkg_config_name", "opusurl")
self.cpp_info.components["opusurl"].libs = ["opusurl"]
self.cpp_info.components["opusurl"].requires = ["libopusfile"]
if self.options.http:
self.cpp_info.components["opusurl"].requires.append("openssl::openssl")
|
4e34917d0dc80f05bf6187763687f8a35951550b
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py
|
3452996b921619444599e7eaae51efdf5e552764
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
proposal_ext_test.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import patch
from openvino.tools.mo.front.caffe.proposal_ext import ProposalFrontExtractor
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.ops.op import Op
from unit_tests.utils.extractors import FakeMultiParam
from unit_tests.utils.graph import FakeNode
class FakeProposalProtoLayer:
def __init__(self, val):
self.proposal_param = val
class TestProposalExt(unittest.TestCase):
@classmethod
def setUpClass(cls):
Op.registered_ops['Proposal'] = ProposalOp
def test_proposal_no_pb_no_ml(self):
self.assertRaises(AttributeError, ProposalFrontExtractor.extract, None)
@patch('openvino.tools.mo.front.caffe.proposal_ext.merge_attrs')
def test_proposal_ext_ideal_numbers(self, merge_attrs):
params = {
'feat_stride': 1,
'base_size': 16,
'min_size': 16,
'ratio': 1,
'scale': 2,
'pre_nms_topn': 6000,
'post_nms_topn': 300,
'nms_thresh': 0.7
}
merge_attrs.return_value = {
**params
}
fake_pl = FakeProposalProtoLayer(FakeMultiParam(params))
fake_node = FakeNode(fake_pl, None)
ProposalFrontExtractor.extract(fake_node)
exp_res = {
'type': "Proposal",
'feat_stride': 1,
'base_size': 16,
'min_size': 16,
'ratio': 1,
'scale': 2,
'pre_nms_topn': 6000,
'post_nms_topn': 300,
'nms_thresh': 0.7,
'infer': ProposalOp.proposal_infer
}
for key in exp_res.keys():
self.assertEqual(fake_node[key], exp_res[key])
|
0c8ea3facb71c3df6e039e050c387c2eafd05a8b
|
a411a55762de11dc2c9d913ff33d2f1477ac02cf
|
/orc8r/cloud/deploy/orc8r_deployer/docker/root/scripts/utils/common.py
|
af62838cd6f4154cda9d3cd57d19315a76147229
|
[
"BSD-3-Clause"
] |
permissive
|
magma/magma
|
0dc48c1513d9968bd05fb7589f302c192b7c0f94
|
0e1d895dfe625681229e181fbc2dbad83e13c5cb
|
refs/heads/master
| 2023-09-04T09:31:56.140395
| 2023-08-29T13:54:49
| 2023-08-29T13:54:49
| 170,803,235
| 1,219
| 525
|
NOASSERTION
| 2023-09-07T17:45:42
| 2019-02-15T04:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
common.py
|
"""
Copyright 2021 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import pathlib
import subprocess
import sys
import click
import yaml
def init():
constants = None
try:
with open("/root/config.yml") as f:
constants = yaml.load(f, Loader=yaml.FullLoader)
except OSError:
print("Failed opening config.yml file")
dirnames = (constants["config_dir"], constants["secret_dir"])
for dirname in dirnames:
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return constants
def execute_command(cmd: list[str], cwd=None, env=None) -> int:
"""Execute command and `return error code
Args:
cmd (list[str]): list describing command to be run
Returns:
int: return code for the executed command
"""
if env:
env.update(os.environ)
with subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=cwd, env=env) as p:
for output in p.stdout:
click.echo(output, nl=False)
return p.wait()
return 1
def run_command(cmd: list[str]) -> subprocess.CompletedProcess:
"""Run command and capture output with string encoding
Args:
cmd (list[str]): list describing command to be run
Returns:
subprocess.CompletedProcess: return value from run
"""
return subprocess.run(cmd, encoding='utf-8', capture_output=True)
def get_json(fn: str) -> dict:
try:
with open(fn) as f:
return json.load(f)
except OSError:
pass
return {}
def put_json(fn: str, cfgs: dict):
with open(fn, 'w') as outfile:
json.dump(cfgs, outfile)
|
9e32aef88522f28f5ce81bcfcfa855e8f4ecafcf
|
ae31542273a142210a1ff30fb76ed9d45d38eba9
|
/gpMgmt/bin/gpcheckcat_modules/repair_missing_extraneous.py
|
49eada7c55487a7f11f7030d05a82f2148bf4506
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
greenplum-db/gpdb
|
8334837bceb2d5d51a684500793d11b190117c6a
|
2c0f8f0fb24a2d7a7da114dc80f5f5a2712fca50
|
refs/heads/main
| 2023-08-22T02:03:03.806269
| 2023-08-21T22:59:53
| 2023-08-22T01:17:10
| 44,781,140
| 6,417
| 2,082
|
Apache-2.0
| 2023-09-14T20:33:42
| 2015-10-23T00:25:17
|
C
|
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
repair_missing_extraneous.py
|
#!/usr/bin/env python3
from gppylib.utils import escapeDoubleQuoteInSQLString
class RepairMissingExtraneous:
def __init__(self, catalog_table_obj, issues, pk_name):
self.catalog_table_obj = catalog_table_obj
catalog_name = self.catalog_table_obj.getTableName()
self._escaped_catalog_name = escapeDoubleQuoteInSQLString(catalog_name)
self._issues = issues
self._pk_name = pk_name
def _generate_delete_sql_for_oid(self, pk_name, oids):
escaped_pk_name = escapeDoubleQuoteInSQLString(pk_name)
delete_sql = 'BEGIN;set allow_system_table_mods=true;delete from {0} where {1} in ({2});COMMIT;'
return delete_sql.format(self._escaped_catalog_name, escaped_pk_name, ','.join(str(oid) for oid in oids))
def _generate_delete_sql_for_pkeys(self, pk_names):
delete_sql = 'BEGIN;set allow_system_table_mods=true;'
for issue in self._issues:
delete_issue_sql = 'delete from {0} where '
for pk, issue_col in zip(pk_names, issue):
operator = " and " if pk != pk_names[-1] else ";"
add_on = "{pk} = '{col}'{operator}".format(pk=pk,
col=str(issue_col),
operator=operator)
delete_issue_sql += add_on
delete_issue_sql = delete_issue_sql.format(self._escaped_catalog_name)
delete_sql += delete_issue_sql
delete_sql += 'COMMIT;'
return delete_sql
def get_delete_sql(self, oids):
if self.catalog_table_obj.tableHasConsistentOids():
pk_name = 'oid' if self._pk_name is None else self._pk_name
return self._generate_delete_sql_for_oid(pk_name=pk_name, oids=oids)
pk_names = tuple(self.catalog_table_obj.getPrimaryKey())
return self._generate_delete_sql_for_pkeys(pk_names=pk_names)
def get_segment_to_oid_mapping(self, all_seg_ids):
if not self._issues:
return
# issues look like this
# [(49401, "extra", [1,2]),
# (49401, "extra", [1,2])]
# OR
# [(49401, 'cmax', "extra", [1,2]),
# (49401, 'cmax', "extra", [1,2])]
all_seg_ids = set([seg_id for seg_id in all_seg_ids])
oids_to_segment_mapping = {}
for issue in self._issues:
oid = issue[0]
issue_type = issue[-2]
seg_ids = issue[-1]
# if an oid is missing from a segment(s) , then it is considered to be extra
# on all the other segments/coordinator
if issue_type == "missing":
seg_ids = all_seg_ids - set(seg_ids)
for seg_id in seg_ids:
if seg_id not in oids_to_segment_mapping:
oids_to_segment_mapping[seg_id] = set()
oids_to_segment_mapping[seg_id].add(oid)
return oids_to_segment_mapping
|
2a8db5833f6dcdf784987cf5f709092a1764c29c
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/client/grr_response_client/client_utils.py
|
a5bc162d75f08abd2544fa12730336cb1b993188
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,729
|
py
|
client_utils.py
|
#!/usr/bin/env python
"""Client utilities."""
import logging
import os
import sys
from typing import Text
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import filesystem
# pylint: disable=g-import-not-at-top
if sys.platform == "win32":
from grr_response_client import client_utils_windows as _client_utils
elif sys.platform == "darwin":
from grr_response_client import client_utils_osx as _client_utils
else:
from grr_response_client import client_utils_linux as _client_utils
# pylint: enable=g-import-not-at-top
# pylint: disable=g-bad-name
CanonicalPathToLocalPath = _client_utils.CanonicalPathToLocalPath
FindProxies = _client_utils.FindProxies
GetExtAttrs = _client_utils.GetExtAttrs
GetRawDevice = _client_utils.GetRawDevice
KeepAlive = _client_utils.KeepAlive
LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath
MemoryRegions = _client_utils.MemoryRegions
OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess
TransactionLog = _client_utils.TransactionLog
VerifyFileOwner = _client_utils.VerifyFileOwner
CreateProcessFromSerializedFileDescriptor = _client_utils.CreateProcessFromSerializedFileDescriptor
# pylint: enable=g-bad-name
def StatEntryFromPath(
path: Text,
pathspec: rdf_paths.PathSpec,
ext_attrs: bool = True,
follow_symlink: bool = True,
) -> rdf_client_fs.StatEntry:
"""Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
follow_symlink: Whether links should be followed.
Returns:
`StatEntry` object.
"""
try:
stat = filesystem.Stat.FromPath(path, follow_symlink=follow_symlink)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatEntryFromStat(stat: filesystem.Stat,
pathspec: rdf_paths.PathSpec,
ext_attrs: bool = True) -> rdf_client_fs.StatEntry:
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
if stat.GetSymlinkTarget() is not None:
result.symlink = stat.GetSymlinkTarget()
return result
def StatEntryFromStatPathSpec(stat: filesystem.Stat,
ext_attrs: bool) -> rdf_client_fs.StatEntry:
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=LocalPathToCanonicalPath(stat.GetPath()),
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatResultFromStatEntry(
stat_entry: rdf_client_fs.StatEntry) -> os.stat_result:
"""Returns a `os.stat_result` with most information from `StatEntry`.
This is a lossy conversion, only the 10 first stat_result fields are
populated, because the os.stat_result constructor is inflexible.
Args:
stat_entry: An instance of rdf_client_fs.StatEntry.
Returns:
An instance of `os.stat_result` with basic fields populated.
"""
values = []
for attr in _STAT_ATTRS[:10]:
values.append(stat_entry.Get(attr))
return os.stat_result(values)
# It is important that the first 10 names are in the order that the stat_result
# constructor accepts. Only this way, a stat_result can be created from a
# StatEntry. See https://docs.python.org/3/library/os.html#os.stat_result
_STAT_ATTRS = [
"st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev",
]
|
f0c00f619309b52eeb31d4e8ad67702f584c1d52
|
ce99bd11ca505967277f4689c621479c1987698e
|
/tools/ge-stats.py
|
e2737a0a143c29d182ab77868c2c956022fe61c2
|
[] |
no_license
|
n64decomp/007
|
5951258890f15431f273e1503674c5e0402c66e0
|
c46751089ddc18b12ef7a45b6a3e03de2054c422
|
refs/heads/master
| 2022-11-08T23:34:54.021033
| 2022-10-29T14:41:01
| 2022-10-29T14:41:01
| 241,212,109
| 359
| 48
| null | 2020-11-21T23:30:31
| 2020-02-17T21:31:00
|
C
|
UTF-8
|
Python
| false
| false
| 29,994
|
py
|
ge-stats.py
|
import datetime
import errno
import functools
import getopt
import os
import re
import shlex
import socket
import subprocess
import sys
import time
"""
re-writing the original ge-stats py. - Ben Burns, Dec 18, 2021
The general approach is to scan for source files in the specified directories (*.s, *.c).
Each file is evaluated, and a list of assembly function definitions is generated.
The map file is then parsed, giving a list of all known functions.
Stats are then computed for asm functions out of all known functions.
The logic to determine whether a function has an assembly definition is rather simple.
It doesn't handle any c preprocessor macros, so the results are not guaranteed to
be accurate. If a line begins with the text "glabel" and is only followed by one
word, that is assumed to be an assembly function definition. For the purposes
of this script, any assembly definition for any ROM version will count against
the total.
"""
# lower case
__supported_versions = ['us', 'jp', 'eu']
# any function read from .map file exceeding this size will throw an exception
__invalid_func_length_size = 1000000
# attempt to filter out extra glabels that are not function definitions. This is for jump table entries
__re_jpt_label = re.compile(r"\.L[0-9a-fA-F]{8}")
# report output paths
__report_dir = "tools" + os.path.sep + "report"
__report_bin = __report_dir + os.path.sep + "report"
__report_template = __report_dir + os.path.sep + "template.html"
__report_out_us = __report_dir + os.path.sep + "index.html"
__report_out_jp = __report_dir + os.path.sep + "JPN.htm"
__report_out_eu = __report_dir + os.path.sep + "EU.htm"
# default fallback if recently modified timestamps don't work
__mtime_fallback_filename = 'ge.u.z64'
class FunctionInfo:
"""
Info about a function
"""
def __init__(self, name: str):
# function name
self.name = name
# flag to indicate if an asm function definition exists
self.nonmatching = False
# length of function as computed from map file
self.length = 0
# parent SourceFileContent
self.parent = None
def __str__(self):
return self.name
def __repr__(self):
return self.name
class SourceFileContent:
"""
Information about a source file
"""
def __init__(self, path: str):
# path to file
self.path = path
# list of all asm function definitions found in the file
self.asm_functions = set()
# list of FunctionInfo as parsed from map file
self.all_functions = []
if path.endswith(".c") or path.endswith(".s") or path.endswith(".o"):
self.path_without_ext = path[:-2]
else:
self.path_without_ext = path
# reference to SearchDir
self.parent = None
# last modified time, in seconds since epoch
self.mtime = 0
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
if not isinstance(other, SourceFileContent):
# don't attempt to compare against unrelated types
return NotImplemented
return self.path == other.path
class SearchDir:
"""
container to define search paramaters.
"""
def __init__(self, path: str, recurse: bool, ignore = None, completed = None):
"""
default constructor
:param str path: folder path, relative to root where script is run. Should not
contain leading or trailing slash.
:param bool recurse: whether or not to recursively search contents of path.
:param ignore: optional files to ignore. List of strings. This is an explicit list of
files with any necessary folder prefix. Do not include leading slash. Wildcards
are not supported.
:param completed: optional files to mark as completed. List of strings. This is an
explicit list of files with any necessary folder prefix. Do not include leading
slash. Wildcards are not supported.
"""
self.path = path
self.recurse = not not recurse
# (config option) ignored files won't be added to source files
if ignore is None:
ignore = []
self.ignore = ignore
# (config option) completed files won't have asm definitions added
if completed is None:
completed = []
self.completed = completed
# (runtime stat) count of functions found in/under this directory
self.function_count = 0
# (runtime stat) length in bytes for all functions found
self.function_byte_count = 0
# (runtime stat) count of non matching functions found in/under this directory
self.nonmatching_count = 0
# (runtime stat) length in bytes for all non matching functions found
self.nonmatching_byte_count = 0
# (runtime stat) count of found files
self.file_count = 0
# (runtime stat) count of found files with no asm definitions
self.completed_file_count = 0
# fix path seperator if this is run on windows os (visual only, it will still run...)
self.path = self.path.replace('/', os.path.sep)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
if not isinstance(other, SearchDir):
# don't attempt to compare against unrelated types
return NotImplemented
return self.path == other.path
class StatResults:
def __init__(self):
# "now" starting point, modification times must be before this datetime.
# Only applies to git log, OS modified time is unaffected.
self.now = datetime.datetime.now()
self.search_dirs = []
# reference to SourceFileContent with highest mtime
self.last_modified_file = None
# status flag. Used to determine if last modified timestamps are valid.
self.last_mtime_valid = False
# count of all functions
self.total_function_count = 0
# length in bytes for all functions
self.total_function_byte_count = 0
# count of non matching functions
self.total_nonmatching_count = 0
# length in bytes for all non matching functions
self.total_nonmatching_byte_count = 0
# count of found files with no asm definitions
self.total_completed_file_count = 0
# list of SourceFileContent
self.source_files = []
def mtime_os(file, now):
print('using os')
return int(os.path.getmtime(file))
def mtime_git(file, now):
print('using git')
try:
date_str = now.strftime('%Y-%m-%dT%H:%M:%S%z')
result = subprocess.run(['git', 'log', '-1', '--format=\"%ct\"', '--date=local', '--before=\"' + date_str + '\"', '--', file], stdout=subprocess.PIPE, universal_newlines=True)
except:
print ('fatal error reading git log history, maybe use OS modified time resolver, --mtime_os option. File: "' + file + '", date_str: "' + date_str + '"')
sys.exit(7)
log_out = result.stdout.strip().replace('"', '')
if not log_out:
return 0
return int(log_out)
def process_source_files(search, stats: StatResults, mtime_resolver):
stats.search_dirs = search
# regular expression: a line that starts with text "glabel", followed by
# a single space, followed by (captured) anything that is not whitespace
# one or more times, followed by end of line.
p1 = re.compile(r"^glabel (\S+)$")
# iterate each search location, looking inside source files for asm function definitions
for s in search:
if not os.path.isdir(s.path):
print('fatal: directory not found:', s.path)
sys.exit(3)
for root, dir_names, filenames in os.walk(s.path):
for filename in filenames:
file = os.path.join(root, filename)
if not (filename.lower().endswith(".c") or filename.lower().endswith(".s")):
continue
if filename in s.ignore:
continue
#print(file)
sfc = SourceFileContent(file)
sfc.asm_functions = set()
sfc.parent = s
sfc.mtime = mtime_resolver(file, stats.now)
import datetime
x = datetime.datetime.fromtimestamp(sfc.mtime)
print(sfc.path, ' = ', x.strftime('%c'))
# The `completed` list is manually configured to specify which files should not be
# counted against the total. This is done by simply not adding asm function
# definitions into the asm_functions property.
if filename not in s.completed:
with open(file) as fp:
# The "in" operator will be faster than regex here, but will include stuff
# we don't want. Do an initial pass with "in", then refine results with
# regex.
# The container is a hashset for cases where the function is
# declared multiple times (e.g., different versions)
file_glabel_lines = [line for line in fp.readlines() if "glabel" in line]
for line in file_glabel_lines:
r1 = p1.findall(line)
if r1 and len(r1) == 1 and not re.match(__re_jpt_label, r1[0]):
sfc.asm_functions.add(r1[0])
stats.source_files.append(sfc)
if not s.recurse:
break
return stats
def apply_build_map(stats: StatResults, version: str):
"""
This iterates over the build map file.
It scans for the start of a `.text` definition indicating a new file.
Then it parses each following line to get function names within the file
until a new section is encountered.
"""
version_code = version[0]
map_file_path = 'build' + os.path.sep + 'ge007.' + version_code + '.map'
infile = False
prevromaddr = 0
# regex:
# a line that starts with text " .text", followed by any whitespace (at least one), then a 64 bit hex address.
# the lower 32 bits of the address are captured in a group. Followed by any whitespace (at least one),
# followed by a hex address (capturing everything after "0x"), followed by any whitespace (at least one).
# It then matches a build path, based on version, capturing everything after the version and slash
# up to the file extension, followed by end of line.
p1 = re.compile(r"^ \.text\s+0x00000000([0-9a-f]{8})\s+0x([0-9a-f]+)\s+build\/" + re.escape(version_code) + r"\/([^\s]+)$")
# regex to match subsequent lines after the above regex matches. This will
# match any whitespace, then 64 bit address (capturing the lower 32 bits), then
# any whitespace (at least one), then capture the remaining non-whitespace-text until end of line (in capture group).
# That last capture group is the function name.
p2 = re.compile(r"^\s+0x00000000([0-9a-f]{8})\s+(\S+)$")
current_sfc = None
current_fi = None
with open(map_file_path, 'r') as map_file:
lines = map_file.readlines()
for line in lines:
m1 = p1.findall(line)
m2 = p2.findall(line)
# m1 match means this is a new .text section (new file)
if m1:
filestart = int(m1[0][0], 16)
filelen = int(m1[0][1], 16)
# normalize paths
segment = (m1[0][2]).replace('/', os.path.sep)
infile = True
prevromaddr = 0
prevfuncname = None
current_fi = None
if segment.endswith(".c") or segment.endswith(".s") or segment.endswith(".o"):
segment_without_ext = segment[:-2]
else:
segment_without_ext = segment
# get the SourceFileContent container for this file listing
current_sfc = next((x for x in stats.source_files if x.path_without_ext == segment_without_ext), None)
# if there's no existing reference, that means this function is listed
# in the map file but not found when searching directories previously.
# In that case, this file/function should be ignored.
if current_sfc is None:
infile = False
elif infile and m2:
romaddr = int(m2[0][0], 16)
funcname = m2[0][1]
if re.match(__re_jpt_label, funcname):
continue
if current_sfc is None:
print('invalid state: current_sfc not set')
sys.exit(4)
if current_fi is not None:
current_fi.length = romaddr - prevromaddr
if current_fi.length > __invalid_func_length_size:
raise ValueError("function length too large: " + str(funcname) + " len = " + str(current_fi.length))
# get the function info for this function, or create a new one
current_fi = next((x for x in current_sfc.all_functions if x.name == funcname), None)
if current_fi is None:
current_fi = FunctionInfo(funcname)
current_fi.parent = current_sfc
# check to see if there was an asm definition for this function
if funcname in current_sfc.asm_functions:
current_fi.nonmatching = True
current_sfc.all_functions.append(current_fi)
prevromaddr = romaddr
prevfuncname = funcname
elif infile:
infile = False
# this is a one line definition in the map file
if current_fi is None:
current_fi = FunctionInfo(funcname)
current_fi.parent = current_sfc
# check to see if there was an asm definition for this function
if funcname in current_sfc.asm_functions:
current_fi.nonmatching = True
# since this is only one entry, use the entire size
current_fi.length = filelen
current_sfc.all_functions.append(current_fi)
else:
current_fi.length = (filestart + filelen) - prevromaddr
if current_fi.length > __invalid_func_length_size:
raise ValueError("function length too large: " + str(funcname) + " len = " + str(current_fi.length))
def percent_complete(num, den):
if not (den > 0):
return 1
elif num == den:
return 1
frac = num / den
if frac <= 0:
return 0
elif frac > 0 and frac < 0.01:
return 0.01
elif frac >= 0.01 and frac <= 0.99:
return frac
elif frac >= 0.99 and frac < 1:
return 0.99
raise ValueError("Can't compute percent complete: " + str(num) + " / " + str(den))
def generate_default_stats(stats: StatResults):
"""
Iterate each file, and each function in each file, to compute
the total bytes/functions/files in each searchdir, and sum
the totals for the entire search.
"""
# track unique modification times
seen_mtime = set()
for file in stats.source_files:
# if this file comes from the .map file and does not have the
# parent association setup then it should be ignored.
if file.parent is None:
continue
if stats.last_modified_file is None or stats.last_modified_file.mtime < file.mtime:
stats.last_modified_file = file
seen_mtime.add(file.mtime)
for func in file.all_functions:
file.parent.function_count += 1
file.parent.function_byte_count += func.length
stats.total_function_count += 1
stats.total_function_byte_count += func.length
if func.nonmatching:
file.parent.nonmatching_count += 1
file.parent.nonmatching_byte_count += func.length
stats.total_nonmatching_count += 1
stats.total_nonmatching_byte_count += func.length
file.parent.file_count += 1
if len(file.asm_functions) == 0:
file.parent.completed_file_count += 1
stats.total_completed_file_count += 1
# More hacks to check for valid modification times.
# 10 is an arbitrary number, but this says that if there are
# at least 10 unique modification times, assume the timestamps
# are valid. Otherwise this will fallbac kto show the default file.
if len(seen_mtime) > 10:
stats.last_mtime_valid = True
# If it wasn't possible to determine recently modified file (i.e., github actions)
# then fallback to "ge.u.z64"
if not stats.last_mtime_valid:
stats.last_modified_file = SourceFileContent(__mtime_fallback_filename)
def print_default_stats(stats: StatResults, version):
print()
print('~> Decomp Statistics for ' + version.upper() +' version')
print('__________________________________________________________________')
print()
print('FILES')
print('\trecently modified:')
if stats.last_mtime_valid:
recent_files = sorted(stats.source_files, key=lambda x: x.mtime, reverse=True)[:5]
for rf in recent_files:
print('\t\t' + rf.path)
else:
print('\t\t' + __mtime_fallback_filename)
print()
for s in stats.search_dirs:
print('\t{:12}\t{:5,} / {:5,} \t{:6.2f}%'.format(s.path, int(s.completed_file_count), int(s.file_count), (percent_complete(s.completed_file_count, s.file_count) * 100)))
print()
print('\ttotal \t{:5,} / {:5,} \t{:6.2f}%'.format(int(stats.total_completed_file_count), len(stats.source_files), (percent_complete(stats.total_completed_file_count, len(stats.source_files)) * 100)))
print('__________________________________________________________________')
print()
print('FUNCTIONS')
for s in stats.search_dirs:
d_completed_func_count = s.function_count - s.nonmatching_count
print('\t{:12}\t{:7,} / {:7,} \t{:6.2f}%'.format(s.path, int(d_completed_func_count), int(s.function_count), (percent_complete(d_completed_func_count, s.function_count) * 100)))
print()
print('\ttotal \t{:7,} / {:7,} \t{:6.2f}%'.format(int(stats.total_nonmatching_count), int(stats.total_function_count), (percent_complete(stats.total_nonmatching_count, stats.total_function_count) * 100)))
print('__________________________________________________________________')
print()
print('BYTES')
for s in stats.search_dirs:
d_completed_byte_count = s.function_byte_count - s.nonmatching_byte_count
print('\t{:12}\t{:11,} / {:11,} \t{:6.2f}%'.format(s.path, int(d_completed_byte_count), int(s.function_byte_count), (percent_complete(d_completed_byte_count, s.function_byte_count) * 100)))
print()
completed_byte_count = stats.total_function_byte_count - stats.total_nonmatching_byte_count
print('\ttotal \t{:11,} / {:11,} \t{:6.2f}%'.format(int(completed_byte_count), int(stats.total_function_byte_count), (percent_complete(completed_byte_count, stats.total_function_byte_count) * 100)))
print('__________________________________________________________________')
print()
def print_non_matching_stats(stats: StatResults, version):
"""
List all non matching functions. Sort by byte length.
Print as csv of filename, function name, function length.
"""
functions = []
for file in stats.source_files:
for func in file.all_functions:
if func.nonmatching:
functions.append(func)
functions.sort(key=lambda x: x.length, reverse=True)
try:
print("filename, function, length")
for f in functions:
print(f.parent.path + ", " + f.name + ", " + str(f.length))
except socket.error as e:
if e.errno == errno.EPIPE:
sys.exit(0)
raise
def bytes_to_words(b):
return (int(b) >> 2) << 2
def generate_report(stats: StatResults, version):
"""
Send values over to the c report app.
Arguments must be added in the following order:
note: changing from "libultra" to "libultrare"
SRC_DIR: int, size in 32-bit words of completed functions, in src/ directory
SRC_DIR_MAX: int, size in 32-bit words of all tracked functions, in src/ directory
GAME_DIR: int, size in 32-bit words of completed functions, in src/game/ directory
GAME_DIR_MAX: int, size in 32-bit words of all tracked functions, in src/game/ directory
INFLATE_DIR: int, size in 32-bit words of completed functions, in src/inflate/ directory
INFLATE_DIR_MAX: int, size in 32-bit words of all tracked functions, in src/inflate/ directory
LIBULTRA_DIR: int, size in 32-bit words of completed functions, in src/libultrare/ directory
LIBULTRA_DIR_MAX: int, size in 32-bit words of all tracked functions, in src/libultrare/ directory
DECOMPILED_WORDS: int, size in 32-bit words of all completed functions
DECOMPILED_WORDS_MAX: int, size in 32-bit words of all tracked functions
DECOMPILED_FILES: int, count of all fully completed files
DECOMPILED_FILES_MAX: int, count of all tracked files
HTML_TEMPLATE: string: path to html template input file
HTML_OUTPUT: string: path to generated html file
LAST_MODIFIED_FILE: string: name of last modified file
LOG_LEVEL: int (enum): LOG_MIN = 0, LOG_DEF, LOG_MAX
"""
sd = next((x for x in stats.search_dirs if x.path == 'src'), None)
if sd is None:
raise ValueError('Could not find "src" SearchDir')
src_dir = bytes_to_words(sd.function_byte_count - sd.nonmatching_byte_count)
src_dir_max = bytes_to_words(sd.function_byte_count)
sd = next((x for x in stats.search_dirs if x.path == 'src/game'), None)
if sd is None:
raise ValueError('Could not find "src/game" SearchDir')
game_dir = bytes_to_words(sd.function_byte_count - sd.nonmatching_byte_count)
game_dir_max = bytes_to_words(sd.function_byte_count)
sd = next((x for x in stats.search_dirs if x.path == 'src/inflate'), None)
if sd is None:
raise ValueError('Could not find "src/inflate" SearchDir')
inflate_dir = bytes_to_words(sd.function_byte_count - sd.nonmatching_byte_count)
inflate_dir_max = bytes_to_words(sd.function_byte_count)
# note: libultrare
sd = next((x for x in stats.search_dirs if x.path == 'src/libultrare'), None)
if sd is None:
raise ValueError('Could not find "src/libultrare" SearchDir')
libultrare_dir = bytes_to_words(sd.function_byte_count - sd.nonmatching_byte_count)
libultrare_dir_max = bytes_to_words(sd.function_byte_count)
decompiled_words = bytes_to_words(stats.total_function_byte_count - stats.total_nonmatching_byte_count)
decompiled_words_max = bytes_to_words(stats.total_function_byte_count)
decompiled_files = stats.total_completed_file_count
decompiled_files_max = len(stats.source_files)
if version == 'us':
html_output = __report_out_us
elif version == 'jp':
html_output = __report_out_jp
elif version == 'eu':
html_output = __report_out_eu
else:
print('fatal: version', version, 'not supported! Supported versions are: ', ', '.join(__supported_versions))
sys.exit(2)
printstring = __report_bin + ' ' + \
str(src_dir) + ' ' + \
str(src_dir_max) + ' ' + \
str(game_dir) + ' ' + \
str(game_dir_max) + ' ' + \
str(inflate_dir) + ' ' + \
str(inflate_dir_max) + ' ' + \
str(libultrare_dir) + ' ' + \
str(libultrare_dir_max) + ' ' + \
str(decompiled_words) + ' ' + \
str(decompiled_words_max) + ' ' + \
str(decompiled_files) + ' ' + \
str(decompiled_files_max) + ' ' + \
__report_template + ' ' + \
html_output + ' ' + \
stats.last_modified_file.path + ' ' + \
'0'
subprocess.Popen(printstring.split())
def print_help():
print('Usage: python3 ge-stats2.py [OPTION]')
print('Generate Decompilation Stats for Goldeneye 007')
print('Example: python3 ge-stats2.py --version us')
print(' ')
print(' This script expects to be run from the root directory. There must be')
print(' a map file in ./build/ge007.u.map (or equivalent version). Source files')
print(' must be located in a folder in the root directory.')
print(' ')
print(' This script does not attempt to parse preprocessor definitions and')
print(' ignores version differences in source files. For the purposes of these')
print(' stats, any version with an asm definition of a function will count')
print(' against completion. However, a version is still required to specify')
print(' which build map to reference against.')
print(' ')
print('Options:')
print(' -v, --version=CODE generate decomp stats for version CODE (us, jp, or eu). Default is us')
print(' -r, --report generate html report using report tool')
print(' -n, --non_matching print csv of all non matching function definitions')
print(' --mtime_os use OS last modified time instead of git log')
print(' -h, --help display this help text and exit')
def main():
# default to US version
version = 'us'
run_report = False
print_method = 'default'
mtime_use_os = False
if (len(sys.argv) > 1):
arguments, values = getopt.getopt(sys.argv[1:], "hv:rn", ["help", "version=", "report", "non_matching", "mtime_os"])
for current_argument, current_value in arguments:
if current_argument in ("-h", "--help"):
print_help()
sys.exit()
elif current_argument in ("-v", "--version"):
version = current_value.lower()
elif current_argument in ("-r", "--report"):
run_report = True
elif current_argument in ("-n", "--non_matching"):
print_method = 'non_matching'
elif current_argument in ("--mtime_os"):
mtime_use_os = True
if version not in __supported_versions:
print('fatal: version', version, 'not supported! Supported versions are: ', ', '.join(__supported_versions))
sys.exit(2)
if run_report:
if not os.path.isfile(__report_bin):
print('fatal: file not found: ' + __report_bin)
sys.exit(6)
# if this is just getting status on nonmatching files, ignore modified time lookup.
if print_method == 'non_matching' and not run_report:
mtime_use_os = True
print(subprocess.run(['git', 'diff', 'origin/master', '--name-only', '"@{10 minutes ago}"']).stdout)
# Default to using git log to get the file's modified date.
# Git log will be much slower, but cloning a new repo (i.e., github actions online)
# will reset all the modified timestamps to the same value, so will need to
# resolve the last modified time from git history.
mtime_resolver = mtime_os
if not mtime_use_os:
if os.path.isdir('.git'):
mtime_resolver = mtime_git
else:
# ok, do one last check (for github actions)
try:
result = subprocess.run(['git', 'log', '-1', '--format=\"%ct\"', '--', 'readme.md'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if result.returncode == 0:
mtime_resolver = mtime_git
except:
pass
# files to count as complete, in src/ directory
src_completed_list = [
'_start.s',
'aspboot.s',
'bootcode.s',
'getra.s',
'gspboot.s',
'osMapTLB.s',
'rom_header.s',
'rspboot.s',
'tlb_hardwire.s',
'tlb_resolve.s']
search = []
search.append(SearchDir('src', False, completed=src_completed_list))
search.append(SearchDir('src/game', False))
search.append(SearchDir('src/inflate', False))
search.append(SearchDir('src/libultrare', True))
stats = StatResults()
process_source_files(search, stats, mtime_resolver)
apply_build_map(stats, version)
generate_default_stats(stats)
if print_method == 'default':
print_default_stats(stats, version)
elif print_method == 'non_matching':
print_non_matching_stats(stats, version)
if run_report:
generate_report(stats, version)
if __name__ == '__main__':
#start_time = time.time()
main()
#print("--- %s seconds ---" % (time.time() - start_time))
|
b7ea3ee01ef000f3a414034b05391a9e6b5ed6c3
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/camwithher.py
|
a0b3749edfcf1bd2c795c256b4d2069bae640f75
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
camwithher.py
|
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
unified_strdate,
)
class CamWithHerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?camwithher\.tv/view_video\.php\?.*\bviewkey=(?P<id>\w+)'
_TESTS = [{
'url': 'http://camwithher.tv/view_video.php?viewkey=6e9a24e2c0e842e1f177&page=&viewtype=&category=',
'info_dict': {
'id': '5644',
'ext': 'flv',
'title': 'Periscope Tease',
'description': 'In the clouds teasing on periscope to my favorite song',
'duration': 240,
'view_count': int,
'comment_count': int,
'uploader': 'MileenaK',
'upload_date': '20160322',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://camwithher.tv/view_video.php?viewkey=6dfd8b7c97531a459937',
'only_matching': True,
}, {
'url': 'http://camwithher.tv/view_video.php?page=&viewkey=6e9a24e2c0e842e1f177&viewtype=&category=',
'only_matching': True,
}, {
'url': 'http://camwithher.tv/view_video.php?viewkey=b6c3b5bea9515d1a1fc4&page=&viewtype=&category=mv',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flv_id = self._html_search_regex(
r'<a[^>]+href=["\']/download/\?v=(\d+)', webpage, 'video id')
# Video URL construction algorithm is reverse-engineered from cwhplayer.swf
rtmp_url = 'rtmp://camwithher.tv/clipshare/%s' % (
('mp4:%s.mp4' % flv_id) if int(flv_id) > 2010 else flv_id)
title = self._html_search_regex(
r'<div[^>]+style="float:left"[^>]*>\s*<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_regex(
r'>Description:</span>(.+?)</div>', webpage, 'description', default=None)
runtime = self._search_regex(
r'Runtime\s*:\s*(.+?) \|', webpage, 'duration', default=None)
if runtime:
runtime = re.sub(r'[\s-]', '', runtime)
duration = parse_duration(runtime)
view_count = int_or_none(self._search_regex(
r'Views\s*:\s*(\d+)', webpage, 'view count', default=None))
comment_count = int_or_none(self._search_regex(
r'Comments\s*:\s*(\d+)', webpage, 'comment count', default=None))
uploader = self._search_regex(
r'Added by\s*:\s*<a[^>]+>([^<]+)</a>', webpage, 'uploader', default=None)
upload_date = unified_strdate(self._search_regex(
r'Added on\s*:\s*([\d-]+)', webpage, 'upload date', default=None))
return {
'id': flv_id,
'url': rtmp_url,
'ext': 'flv',
'no_resume': True,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
'uploader': uploader,
'upload_date': upload_date,
'age_limit': 18
}
|
1603181fcc28fc23bc46c44112a80124f67d4deb
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/sleekxmpp/plugins/xep_0202/stanza.py
|
b6ccc960163ebc4d171ff357a7c08008fe1f49b8
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,689
|
py
|
stanza.py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import datetime as dt
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.plugins import xep_0082
from sleekxmpp.thirdparty import tzutc, tzoffset
class EntityTime(ElementBase):
"""
The <time> element represents the local time for an XMPP agent.
The time is expressed in UTC to make synchronization easier
between entities, but the offset for the local timezone is also
included.
Example <time> stanzas:
<iq type="result">
<time xmlns="urn:xmpp:time">
<utc>2011-07-03T11:37:12.234569</utc>
<tzo>-07:00</tzo>
</time>
</iq>
Stanza Interface:
time -- The local time for the entity (updates utc and tzo).
utc -- The UTC equivalent to local time.
tzo -- The local timezone offset from UTC.
Methods:
get_time -- Return local time datetime object.
set_time -- Set UTC and TZO fields.
del_time -- Remove both UTC and TZO fields.
get_utc -- Return datetime object of UTC time.
set_utc -- Set the UTC time.
get_tzo -- Return tzinfo object.
set_tzo -- Set the local timezone offset.
"""
name = 'time'
namespace = 'urn:xmpp:time'
plugin_attrib = 'entity_time'
interfaces = set(('tzo', 'utc', 'time'))
sub_interfaces = interfaces
def set_time(self, value):
"""
Set both the UTC and TZO fields given a time object.
Arguments:
value -- A datetime object or properly formatted
string equivalent.
"""
date = value
if not isinstance(value, dt.datetime):
date = xep_0082.parse(value)
self['utc'] = date
self['tzo'] = date.tzinfo
def get_time(self):
"""
Return the entity's local time based on the UTC and TZO data.
"""
date = self['utc']
tz = self['tzo']
return date.astimezone(tz)
def del_time(self):
"""Remove both the UTC and TZO fields."""
del self['utc']
del self['tzo']
def get_tzo(self):
"""
Return the timezone offset from UTC as a tzinfo object.
"""
tzo = self._get_sub_text('tzo')
if tzo == '':
tzo = 'Z'
time = xep_0082.parse('00:00:00%s' % tzo)
return time.tzinfo
def set_tzo(self, value):
"""
Set the timezone offset from UTC.
Arguments:
value -- Either a tzinfo object or the number of
seconds (positive or negative) to offset.
"""
time = xep_0082.time(offset=value)
if xep_0082.parse(time).tzinfo == tzutc():
self._set_sub_text('tzo', 'Z')
else:
self._set_sub_text('tzo', time[-6:])
def get_utc(self):
"""
Return the time in UTC as a datetime object.
"""
value = self._get_sub_text('utc')
if value == '':
return xep_0082.parse(xep_0082.datetime())
return xep_0082.parse('%sZ' % value)
def set_utc(self, value):
"""
Set the time in UTC.
Arguments:
value -- A datetime object or properly formatted
string equivalent.
"""
date = value
if not isinstance(value, dt.datetime):
date = xep_0082.parse(value)
date = date.astimezone(tzutc())
value = xep_0082.format_datetime(date)[:-1]
self._set_sub_text('utc', value)
|
ccb84d677c8d515738221a64137603edbd382ad5
|
1270c0406445c3327b0ccaeff109048b23520c60
|
/scan_to_paperless/process_utils.py
|
22dc93b9cf94dab9f4d70370674ea1fce7213115
|
[
"BSD-2-Clause"
] |
permissive
|
sbrunner/scan-to-paperless
|
740f22f25149afc5cc01d0e827f03311fb4bc27c
|
ecaf35426a812ec3ea5cb12e04596d09fdadb9d3
|
refs/heads/test
| 2023-08-31T13:23:16.032719
| 2023-08-30T17:36:56
| 2023-08-31T07:10:26
| 155,273,175
| 154
| 15
|
BSD-2-Clause
| 2023-09-14T17:58:51
| 2018-10-29T20:00:45
|
Python
|
UTF-8
|
Python
| false
| false
| 12,373
|
py
|
process_utils.py
|
"""Utility functions and context used in the process."""
import logging
import math
import os
from typing import TYPE_CHECKING, Any, Callable, Optional, Union, cast
import cv2
import numpy as np
from PIL import Image
import scan_to_paperless
import scan_to_paperless.jupyter_utils
import scan_to_paperless.status
from scan_to_paperless import process_schema as schema
if TYPE_CHECKING:
NpNdarrayInt = np.ndarray[np.uint8, Any]
else:
NpNdarrayInt = np.ndarray
_LOG = logging.getLogger(__name__)
def rotate_image(
image: NpNdarrayInt, angle: float, background: Union[int, tuple[int, int, int]]
) -> NpNdarrayInt:
"""Rotate the image."""
old_width, old_height = image.shape[:2]
angle_radian = math.radians(angle)
width = abs(np.sin(angle_radian) * old_height) + abs(np.cos(angle_radian) * old_width)
height = abs(np.sin(angle_radian) * old_width) + abs(np.cos(angle_radian) * old_height)
image_center: tuple[Any, ...] = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
rot_mat[1, 2] += (width - old_width) / 2
rot_mat[0, 2] += (height - old_height) / 2
return cast(
NpNdarrayInt,
cv2.warpAffine(image, rot_mat, (int(round(height)), int(round(width))), borderValue=background),
)
def crop_image( # pylint: disable=too-many-arguments
image: NpNdarrayInt,
x: int,
y: int,
width: int,
height: int,
background: Union[tuple[int], tuple[int, int, int]],
) -> NpNdarrayInt:
"""Crop the image."""
matrix: NpNdarrayInt = np.array([[1.0, 0.0, -x], [0.0, 1.0, -y]])
return cast(
NpNdarrayInt,
cv2.warpAffine(image, matrix, (int(round(width)), int(round(height))), borderValue=background),
)
class Context: # pylint: disable=too-many-instance-attributes
"""All the context of the current image with his mask."""
def __init__( # pylint: disable=too-many-arguments
self,
config: schema.Configuration,
step: schema.Step,
config_file_name: Optional[str] = None,
root_folder: Optional[str] = None,
image_name: Optional[str] = None,
) -> None:
"""Initialize."""
self.config = config
self.step = step
self.config_file_name = config_file_name
self.root_folder = root_folder
self.image_name = image_name
self.image: Optional[NpNdarrayInt] = None
self.mask: Optional[NpNdarrayInt] = None
self.get_index: Callable[
[NpNdarrayInt], Optional[tuple[np.ndarray[Any, np.dtype[np.signedinteger[Any]]], ...]]
] = lambda image: np.ix_(
np.arange(0, image.shape[1]),
np.arange(0, image.shape[1]),
np.arange(0, image.shape[2]),
)
self.process_count = self.step.get("process_count", 0)
def _get_default_mask_file(self, default_file_name: str) -> str:
if not self.root_folder:
return ""
mask_file = os.path.join(self.root_folder, default_file_name)
if not os.path.exists(mask_file):
base_folder = os.path.dirname(self.root_folder)
assert base_folder
mask_file = os.path.join(base_folder, default_file_name)
if not os.path.exists(mask_file):
return ""
return mask_file
def _get_mask(
self,
auto_mask_config: Optional[schema.AutoMask],
config_section: str,
mask_file: Optional[str] = None,
) -> Optional[NpNdarrayInt]:
"""Init the mask."""
if auto_mask_config is not None:
hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
lower_val = np.array(
auto_mask_config.setdefault("lower_hsv_color", schema.LOWER_HSV_COLOR_DEFAULT)
)
upper_val = np.array(
auto_mask_config.setdefault("upper_hsv_color", schema.UPPER_HSV_COLOR_DEFAULT)
)
mask = cv2.inRange(hsv, lower_val, upper_val)
de_noise_size = auto_mask_config.setdefault("de_noise_size", schema.DE_NOISE_SIZE_DEFAULT)
mask = cv2.copyMakeBorder(
mask,
de_noise_size,
de_noise_size,
de_noise_size,
de_noise_size,
cv2.BORDER_REPLICATE,
)
if auto_mask_config.setdefault("de_noise_morphology", schema.DE_NOISE_MORPHOLOGY_DEFAULT):
mask = cv2.morphologyEx(
mask,
cv2.MORPH_CLOSE,
cv2.getStructuringElement(cv2.MORPH_RECT, (de_noise_size, de_noise_size)),
)
else:
blur = cv2.blur(
mask,
(de_noise_size, de_noise_size),
)
_, mask = cv2.threshold(
blur,
auto_mask_config.setdefault("de_noise_level", schema.DE_NOISE_LEVEL_DEFAULT),
255,
cv2.THRESH_BINARY,
)
inverse_mask = auto_mask_config.setdefault("inverse_mask", schema.INVERSE_MASK_DEFAULT)
if not inverse_mask:
mask = cv2.bitwise_not(mask)
buffer_size = auto_mask_config.setdefault("buffer_size", schema.BUFFER_SIZE_DEFAULT)
blur = cv2.blur(mask, (buffer_size, buffer_size))
_, mask = cv2.threshold(
blur,
auto_mask_config.setdefault("buffer_level", schema.BUFFER_LEVEL_DEFAULT),
255,
cv2.THRESH_BINARY,
)
mask = mask[de_noise_size:-de_noise_size, de_noise_size:-de_noise_size]
if self.root_folder:
if mask_file and os.path.exists(mask_file):
mask = cv2.add(
mask,
cv2.bitwise_not(
cv2.resize(
cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE),
(mask.shape[1], mask.shape[0]),
)
),
)
final_mask = cv2.bitwise_not(mask)
if os.environ.get("PROGRESS", "FALSE") == "TRUE" and self.root_folder:
self.save_progress_images(config_section.replace("_", "-"), final_mask)
elif self.root_folder and mask_file:
final_mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
if self.image is not None and final_mask is not None:
return cast(NpNdarrayInt, cv2.resize(final_mask, (self.image.shape[1], self.image.shape[0])))
return cast(NpNdarrayInt, final_mask)
def init_mask(self) -> None:
"""Init the mask image used to mask the image on the crop and skew calculation."""
mask_config = self.config["args"].setdefault(
"mask", cast(schema.MaskOperation, schema.MASK_OPERATION_DEFAULT)
)
self.mask = (
self._get_mask(
mask_config.setdefault("auto_mask", {}),
"mask",
mask_config.setdefault("additional_filename", self._get_default_mask_file("mask.png")),
)
if mask_config.setdefault("enabled", schema.MASK_ENABLED_DEFAULT)
else None
)
def get_background_color(self) -> tuple[int, int, int]:
"""Get the background color."""
return cast(
tuple[int, int, int],
self.config["args"].setdefault("background_color", schema.BACKGROUND_COLOR_DEFAULT),
)
def do_initial_cut(self) -> None:
"""Definitively mask the original image."""
cut_config = self.config["args"].get("cut", cast(schema.CutOperation, schema.CUT_OPERATION_DEFAULT))
if cut_config.get("enabled", schema.CROP_ENABLED_DEFAULT):
assert self.image is not None
mask = self._get_mask(
cut_config.setdefault("auto_mask", {}),
"auto_cut",
cut_config.setdefault("additional_filename", self._get_default_mask_file("cut.png")),
)
self.image[mask == 0] = self.get_background_color()
def get_process_count(self) -> int:
"""Get the step number."""
try:
return self.process_count
finally:
self.process_count += 1
def get_masked(self) -> NpNdarrayInt:
"""Get the mask."""
if self.image is None:
raise scan_to_paperless.ScanToPaperlessException("The image is None")
if self.mask is None:
return self.image.copy()
image = self.image.copy()
image[self.mask == 0] = self.get_background_color()
return image
def crop(self, x: int, y: int, width: int, height: int) -> None:
"""Crop the image."""
if self.image is None:
raise scan_to_paperless.ScanToPaperlessException("The image is None")
self.image = crop_image(self.image, x, y, width, height, self.get_background_color())
if self.mask is not None:
self.mask = crop_image(self.mask, x, y, width, height, (0,))
def rotate(self, angle: float) -> None:
"""Rotate the image."""
if self.image is None:
raise scan_to_paperless.ScanToPaperlessException("The image is None")
self.image = rotate_image(self.image, angle, self.get_background_color())
if self.mask is not None:
self.mask = rotate_image(self.mask, angle, 0)
def get_px_value(self, value: Union[int, float]) -> float:
"""Get the value in px."""
return value / 10 / 2.51 * self.config["args"].setdefault("dpi", schema.DPI_DEFAULT)
def is_progress(self) -> bool:
"""Return we want to have the intermediate files."""
return os.environ.get("PROGRESS", "FALSE") == "TRUE" or self.config.setdefault(
"progress", schema.PROGRESS_DEFAULT
)
def save_progress_images(
self,
name: str,
image: Optional[NpNdarrayInt] = None,
image_prefix: str = "",
process_count: Optional[int] = None,
force: bool = False,
) -> Optional[str]:
"""Save the intermediate images."""
if scan_to_paperless.jupyter_utils.is_ipython():
if image is None:
return None
from IPython.display import display # pylint: disable=import-outside-toplevel
display(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))) # type: ignore[no-untyped-call]
return None
if process_count is None:
process_count = self.get_process_count()
if (self.is_progress() or force) and self.image_name is not None and self.root_folder is not None:
name = f"{process_count}-{name}" if self.is_progress() else name
dest_folder = os.path.join(self.root_folder, name)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
dest_image = os.path.join(dest_folder, image_prefix + self.image_name)
if image is not None:
try:
cv2.imwrite(dest_image, image)
return dest_image
except Exception as exception:
print(exception)
else:
try:
cv2.imwrite(dest_image, self.image)
except Exception as exception:
print(exception)
dest_image = os.path.join(dest_folder, "mask-" + self.image_name)
try:
dest_image = os.path.join(dest_folder, "masked-" + self.image_name)
except Exception as exception:
print(exception)
try:
cv2.imwrite(dest_image, self.get_masked())
except Exception as exception:
print(exception)
return None
def display_image(self, image: NpNdarrayInt) -> None:
"""Display the image."""
if scan_to_paperless.jupyter_utils.is_ipython():
from IPython.display import display # pylint: disable=import-outside-toplevel
display(Image.fromarray(cv2.cvtColor(image[self.get_index(image)], cv2.COLOR_BGR2RGB))) # type: ignore[no-untyped-call]
|
3cb02c6240674af8ebca5ddf57b5a4adccd28b23
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/io/nirx/_localized_abbr.py
|
c12133ef994a363df352752ceb6ab466b2e20eb3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
_localized_abbr.py
|
"""Localizations for meas_date extraction."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# This file was generated on 2021/01/31 on an Ubuntu system.
# When getting "unsupported locale setting" on Ubuntu (e.g., with localepurge),
# use "sudo locale-gen de_DE" etc. then "sudo update-locale".
"""
import datetime
import locale
print('_localized_abbr = {')
for loc in ('en_US.utf8', 'de_DE', 'fr_FR', 'it_IT'):
print(f' {repr(loc)}: {{')
print(' "month": {', end='')
month_abbr = set()
for month in range(1, 13): # Month as locale’s abbreviated name
locale.setlocale(locale.LC_TIME, "en_US.utf8")
dt = datetime.datetime(year=2000, month=month, day=1)
val = dt.strftime("%b").lower()
locale.setlocale(locale.LC_TIME, loc)
key = dt.strftime("%b").lower()
month_abbr.add(key)
print(f'{repr(key)}: {repr(val)}, ', end='')
print('}, # noqa')
print(' "weekday": {', end='')
weekday_abbr = set()
for day in range(1, 8): # Weekday as locale’s abbreviated name.
locale.setlocale(locale.LC_TIME, "en_US.utf8")
dt = datetime.datetime(year=2000, month=1, day=day)
val = dt.strftime("%a").lower()
locale.setlocale(locale.LC_TIME, loc)
key = dt.strftime("%a").lower()
assert key not in weekday_abbr, key
weekday_abbr.add(key)
print(f'{repr(key)}: {repr(val)}, ', end='')
print('}, # noqa')
print(' },')
print('}\n')
"""
_localized_abbr = {
"en_US.utf8": {
"month": {
"jan": "jan",
"feb": "feb",
"mar": "mar",
"apr": "apr",
"may": "may",
"jun": "jun",
"jul": "jul",
"aug": "aug",
"sep": "sep",
"oct": "oct",
"nov": "nov",
"dec": "dec",
}, # noqa
"weekday": {
"sat": "sat",
"sun": "sun",
"mon": "mon",
"tue": "tue",
"wed": "wed",
"thu": "thu",
"fri": "fri",
}, # noqa
},
"de_DE": {
"month": {
"jan": "jan",
"feb": "feb",
"mär": "mar",
"apr": "apr",
"mai": "may",
"jun": "jun",
"jul": "jul",
"aug": "aug",
"sep": "sep",
"okt": "oct",
"nov": "nov",
"dez": "dec",
}, # noqa
"weekday": {
"sa": "sat",
"so": "sun",
"mo": "mon",
"di": "tue",
"mi": "wed",
"do": "thu",
"fr": "fri",
}, # noqa
},
"fr_FR": {
"month": {
"janv.": "jan",
"févr.": "feb",
"mars": "mar",
"avril": "apr",
"mai": "may",
"juin": "jun",
"juil.": "jul",
"août": "aug",
"sept.": "sep",
"oct.": "oct",
"nov.": "nov",
"déc.": "dec",
}, # noqa
"weekday": {
"sam.": "sat",
"dim.": "sun",
"lun.": "mon",
"mar.": "tue",
"mer.": "wed",
"jeu.": "thu",
"ven.": "fri",
}, # noqa
},
"it_IT": {
"month": {
"gen": "jan",
"feb": "feb",
"mar": "mar",
"apr": "apr",
"mag": "may",
"giu": "jun",
"lug": "jul",
"ago": "aug",
"set": "sep",
"ott": "oct",
"nov": "nov",
"dic": "dec",
}, # noqa
"weekday": {
"sab": "sat",
"dom": "sun",
"lun": "mon",
"mar": "tue",
"mer": "wed",
"gio": "thu",
"ven": "fri",
}, # noqa
},
}
|
4ed982b6b03e21c50ed89858d1dd34d517aa8593
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Matrix_Portal_Flow_Viewer/flow_runner.py
|
50321430cc4512de10bef470fdafcc44cc875e1b
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 989
|
py
|
flow_runner.py
|
# SPDX-FileCopyrightText: 2020 Carter Nelson for Adafruit Industries
#
# SPDX-License-Identifier: MIT
#======
# NOTE: Run this on your PC, not the Matrix Portal.
#======
import sys
import numpy as np
from PIL import Image
from ecoulements import systeme
# load geometry
grid = np.where(np.asarray(Image.open(sys.argv[1])), 1, 0)
# add inlet / outlet flows
inlet = np.array([2] * grid.shape[0])
outlet = np.array([3] * grid.shape[0])
grid = np.hstack((inlet[:, None], grid, outlet[:, None]))
# add upper/ lower walls
wall = np.array([0] * grid.shape[1])
grid = np.vstack((wall, grid, wall))
# solve
_, VX, VY, _ = systeme.sol(grid)
# save results to file
OUTFILE = "flow_solution.py"
with open(OUTFILE , "w") as fp:
fp.write("nan = None\n")
fp.write("solution = {\n")
fp.write('"VX":\n')
fp.write(str(VX[1:-1, 1:-1].tolist()))
fp.write(',\n"VY":\n')
fp.write(str(VY[1:-1, 1:-1].tolist()))
fp.write("\n}\n")
# done
print("DONE! Results saved to", OUTFILE)
|
7c2f026f2fa651c1203c38ec109c737f9c1b5046
|
ed6c9903832748003a9208112be9c3db184954dc
|
/doc/LectureNotes/_build/jupyter_execute/resamplingmethods.py
|
bf919c97544bdb61fe00b35c15f8310103d2ff19
|
[
"CC0-1.0"
] |
permissive
|
CompPhysics/ComputationalPhysics2
|
ece24a2b16d7ed391c759765e228ddb484408757
|
73b364a3fc7df6e23e43876f1d5b6305a9f893cf
|
refs/heads/gh-pages
| 2023-08-31T05:09:39.554926
| 2023-05-07T01:51:00
| 2023-05-07T01:51:00
| 28,933,001
| 128
| 65
|
CC0-1.0
| 2022-03-03T21:50:27
| 2015-01-07T20:38:21
| null |
UTF-8
|
Python
| false
| false
| 27,314
|
py
|
resamplingmethods.py
|
#!/usr/bin/env python
# coding: utf-8
# # Resampling Techniques, Bootstrap and Blocking
#
#
#
# ## Why resampling methods ?
# **Statistical analysis.**
#
# * Our simulations can be treated as *computer experiments*. This is particularly the case for Monte Carlo methods
#
# * The results can be analysed with the same statistical tools as we would use analysing experimental data.
#
# * As in all experiments, we are looking for expectation values and an estimate of how accurate they are, i.e., possible sources for errors.
#
#
#
#
# ## Statistical analysis
# * As in other experiments, many numerical experiments have two classes of errors:
#
# * Statistical errors
#
# * Systematical errors
#
#
# * Statistical errors can be estimated using standard tools from statistics
#
# * Systematical errors are method specific and must be treated differently from case to case.
#
#
#
#
#
#
#
# ## Statistics, wrapping up from last week
# Let us analyze the problem by splitting up the correlation term into
# partial sums of the form:
# $$
# f_d = \frac{1}{n-d}\sum_{k=1}^{n-d}(x_k - \bar x_n)(x_{k+d} - \bar x_n)
# $$
# The correlation term of the error can now be rewritten in terms of
# $f_d$
# $$
# \frac{2}{n}\sum_{k<l} (x_k - \bar x_n)(x_l - \bar x_n) =
# 2\sum_{d=1}^{n-1} f_d
# $$
# The value of $f_d$ reflects the correlation between measurements
# separated by the distance $d$ in the sample samples. Notice that for
# $d=0$, $f$ is just the sample variance, $\mathrm{var}(x)$. If we divide $f_d$
# by $\mathrm{var}(x)$, we arrive at the so called *autocorrelation function*
# $$
# \kappa_d = \frac{f_d}{\mathrm{var}(x)}
# $$
# which gives us a useful measure of pairwise correlations
# starting always at $1$ for $d=0$.
#
#
#
#
#
# ## Statistics, final expression
# The sample error can now be
# written in terms of the autocorrelation function:
# $$
# \mathrm{err}_X^2 =
# \frac{1}{n}\mathrm{var}(x)+\frac{2}{n}\cdot\mathrm{var}(x)\sum_{d=1}^{n-1}
# \frac{f_d}{\mathrm{var}(x)}\nonumber
# $$
# $$
# =
# \left(1+2\sum_{d=1}^{n-1}\kappa_d\right)\frac{1}{n}\mathrm{var}(x)\nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# =\frac{\tau}{n}\cdot\mathrm{var}(x)
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# and we see that $\mathrm{err}_X$ can be expressed in terms the
# uncorrelated sample variance times a correction factor $\tau$ which
# accounts for the correlation between measurements. We call this
# correction factor the *autocorrelation time*:
# <!-- Equation labels as ordinary links -->
# <div id="eq:autocorrelation_time"></div>
#
# $$
# \begin{equation}
# \tau = 1+2\sum_{d=1}^{n-1}\kappa_d
# \label{eq:autocorrelation_time} \tag{2}
# \end{equation}
# $$
# ## Statistics, effective number of correlations
# For a correlation free experiment, $\tau$
# equals 1.
#
# We can interpret a sequential
# correlation as an effective reduction of the number of measurements by
# a factor $\tau$. The effective number of measurements becomes:
# $$
# n_\mathrm{eff} = \frac{n}{\tau}
# $$
# To neglect the autocorrelation time $\tau$ will always cause our
# simple uncorrelated estimate of $\mathrm{err}_X^2\approx \mathrm{var}(x)/n$ to
# be less than the true sample error. The estimate of the error will be
# too *good*. On the other hand, the calculation of the full
# autocorrelation time poses an efficiency problem if the set of
# measurements is very large.
#
#
#
#
#
#
#
#
#
# ## Can we understand this? Time Auto-correlation Function
#
# The so-called time-displacement autocorrelation $\phi(t)$ for a quantity $\mathbf{M}$ is given by
# $$
# \phi(t) = \int dt' \left[\mathbf{M}(t')-\langle \mathbf{M} \rangle\right]\left[\mathbf{M}(t'+t)-\langle \mathbf{M} \rangle\right],
# $$
# which can be rewritten as
# $$
# \phi(t) = \int dt' \left[\mathbf{M}(t')\mathbf{M}(t'+t)-\langle \mathbf{M} \rangle^2\right],
# $$
# where $\langle \mathbf{M} \rangle$ is the average value and
# $\mathbf{M}(t)$ its instantaneous value. We can discretize this function as follows, where we used our
# set of computed values $\mathbf{M}(t)$ for a set of discretized times (our Monte Carlo cycles corresponding to moving all electrons?)
# <!-- Equation labels as ordinary links -->
# <div id="eq:phitf"></div>
#
# $$
# \phi(t) = \frac{1}{t_{\mathrm{max}}-t}\sum_{t'=0}^{t_{\mathrm{max}}-t}\mathbf{M}(t')\mathbf{M}(t'+t)
# -\frac{1}{t_{\mathrm{max}}-t}\sum_{t'=0}^{t_{\mathrm{max}}-t}\mathbf{M}(t')\times
# \frac{1}{t_{\mathrm{max}}-t}\sum_{t'=0}^{t_{\mathrm{max}}-t}\mathbf{M}(t'+t).
# \label{eq:phitf} \tag{3}
# $$
# ## Time Auto-correlation Function
#
# One should be careful with times close to $t_{\mathrm{max}}$, the upper limit of the sums
# becomes small and we end up integrating over a rather small time interval. This means that the statistical
# error in $\phi(t)$ due to the random nature of the fluctuations in $\mathbf{M}(t)$ can become large.
#
# One should therefore choose $t \ll t_{\mathrm{max}}$.
#
# Note that the variable $\mathbf{M}$ can be any expectation values of interest.
#
#
#
# The time-correlation function gives a measure of the correlation between the various values of the variable
# at a time $t'$ and a time $t'+t$. If we multiply the values of $\mathbf{M}$ at these two different times,
# we will get a positive contribution if they are fluctuating in the same direction, or a negative value
# if they fluctuate in the opposite direction. If we then integrate over time, or use the discretized version of, the time correlation function $\phi(t)$ should take a non-zero value if the fluctuations are
# correlated, else it should gradually go to zero. For times a long way apart
# the different values of $\mathbf{M}$ are most likely
# uncorrelated and $\phi(t)$ should be zero.
#
#
#
#
#
#
#
# ## Time Auto-correlation Function
# We can derive the correlation time by observing that our Metropolis algorithm is based on a random
# walk in the space of all possible spin configurations.
# Our probability
# distribution function $\mathbf{\hat{w}}(t)$ after a given number of time steps $t$ could be written as
# $$
# \mathbf{\hat{w}}(t) = \mathbf{\hat{W}^t\hat{w}}(0),
# $$
# with $\mathbf{\hat{w}}(0)$ the distribution at $t=0$ and $\mathbf{\hat{W}}$ representing the
# transition probability matrix.
# We can always expand $\mathbf{\hat{w}}(0)$ in terms of the right eigenvectors of
# $\mathbf{\hat{v}}$ of $\mathbf{\hat{W}}$ as
# $$
# \mathbf{\hat{w}}(0) = \sum_i\alpha_i\mathbf{\hat{v}}_i,
# $$
# resulting in
# $$
# \mathbf{\hat{w}}(t) = \mathbf{\hat{W}}^t\mathbf{\hat{w}}(0)=\mathbf{\hat{W}}^t\sum_i\alpha_i\mathbf{\hat{v}}_i=
# \sum_i\lambda_i^t\alpha_i\mathbf{\hat{v}}_i,
# $$
# with $\lambda_i$ the $i^{\mathrm{th}}$ eigenvalue corresponding to
# the eigenvector $\mathbf{\hat{v}}_i$.
#
#
#
#
#
#
#
# ## Time Auto-correlation Function
# If we assume that $\lambda_0$ is the largest eigenvector we see that in the limit $t\rightarrow \infty$,
# $\mathbf{\hat{w}}(t)$ becomes proportional to the corresponding eigenvector
# $\mathbf{\hat{v}}_0$. This is our steady state or final distribution.
#
# We can relate this property to an observable like the mean energy.
# With the probabilty $\mathbf{\hat{w}}(t)$ (which in our case is the squared trial wave function) we
# can write the expectation values as
# $$
# \langle \mathbf{M}(t) \rangle = \sum_{\mu} \mathbf{\hat{w}}(t)_{\mu}\mathbf{M}_{\mu},
# $$
# or as the scalar of a vector product
# $$
# \langle \mathbf{M}(t) \rangle = \mathbf{\hat{w}}(t)\mathbf{m},
# $$
# with $\mathbf{m}$ being the vector whose elements are the values of $\mathbf{M}_{\mu}$ in its
# various microstates $\mu$.
#
#
#
#
#
# ## Time Auto-correlation Function
#
#
# We rewrite this relation as
# $$
# \langle \mathbf{M}(t) \rangle = \mathbf{\hat{w}}(t)\mathbf{m}=\sum_i\lambda_i^t\alpha_i\mathbf{\hat{v}}_i\mathbf{m}_i.
# $$
# If we define $m_i=\mathbf{\hat{v}}_i\mathbf{m}_i$ as the expectation value of
# $\mathbf{M}$ in the $i^{\mathrm{th}}$ eigenstate we can rewrite the last equation as
# $$
# \langle \mathbf{M}(t) \rangle = \sum_i\lambda_i^t\alpha_im_i.
# $$
# Since we have that in the limit $t\rightarrow \infty$ the mean value is dominated by the
# the largest eigenvalue $\lambda_0$, we can rewrite the last equation as
# $$
# \langle \mathbf{M}(t) \rangle = \langle \mathbf{M}(\infty) \rangle+\sum_{i\ne 0}\lambda_i^t\alpha_im_i.
# $$
# We define the quantity
# $$
# \tau_i=-\frac{1}{log\lambda_i},
# $$
# and rewrite the last expectation value as
# <!-- Equation labels as ordinary links -->
# <div id="eq:finalmeanm"></div>
#
# $$
# \langle \mathbf{M}(t) \rangle = \langle \mathbf{M}(\infty) \rangle+\sum_{i\ne 0}\alpha_im_ie^{-t/\tau_i}.
# \label{eq:finalmeanm} \tag{4}
# $$
# ## Time Auto-correlation Function
#
# The quantities $\tau_i$ are the correlation times for the system. They control also the auto-correlation function
# discussed above. The longest correlation time is obviously given by the second largest
# eigenvalue $\tau_1$, which normally defines the correlation time discussed above. For large times, this is the
# only correlation time that survives. If higher eigenvalues of the transition matrix are well separated from
# $\lambda_1$ and we simulate long enough, $\tau_1$ may well define the correlation time.
# In other cases we may not be able to extract a reliable result for $\tau_1$.
# Coming back to the time correlation function $\phi(t)$ we can present a more general definition in terms
# of the mean magnetizations $ \langle \mathbf{M}(t) \rangle$. Recalling that the mean value is equal
# to $ \langle \mathbf{M}(\infty) \rangle$ we arrive at the expectation values
# $$
# \phi(t) =\langle \mathbf{M}(0)-\mathbf{M}(\infty)\rangle \langle \mathbf{M}(t)-\mathbf{M}(\infty)\rangle,
# $$
# resulting in
# $$
# \phi(t) =\sum_{i,j\ne 0}m_i\alpha_im_j\alpha_je^{-t/\tau_i},
# $$
# which is appropriate for all times.
#
#
#
#
#
#
# ## Correlation Time
#
# If the correlation function decays exponentially
# $$
# \phi (t) \sim \exp{(-t/\tau)}
# $$
# then the exponential correlation time can be computed as the average
# $$
# \tau_{\mathrm{exp}} = -\langle \frac{t}{log|\frac{\phi(t)}{\phi(0)}|} \rangle.
# $$
# If the decay is exponential, then
# $$
# \int_0^{\infty} dt \phi(t) = \int_0^{\infty} dt \phi(0)\exp{(-t/\tau)} = \tau \phi(0),
# $$
# which suggests another measure of correlation
# $$
# \tau_{\mathrm{int}} = \sum_k \frac{\phi(k)}{\phi(0)},
# $$
# called the integrated correlation time.
#
#
#
#
#
#
#
#
#
# ## Resampling methods: Jackknife and Bootstrap
#
# Two famous
# resampling methods are the **independent bootstrap** and **the jackknife**.
#
# The jackknife is a special case of the independent bootstrap. Still, the jackknife was made
# popular prior to the independent bootstrap. And as the popularity of
# the independent bootstrap soared, new variants, such as **the dependent bootstrap**.
#
# The Jackknife and independent bootstrap work for
# independent, identically distributed random variables.
# If these conditions are not
# satisfied, the methods will fail. Yet, it should be said that if the data are
# independent, identically distributed, and we only want to estimate the
# variance of $\overline{X}$ (which often is the case), then there is no
# need for bootstrapping.
#
#
# ## Resampling methods: Jackknife
#
# The Jackknife works by making many replicas of the estimator $\widehat{\theta}$.
# The jackknife is a resampling method, we explained that this happens by scrambling the data in some way. When using the jackknife, this is done by systematically leaving out one observation from the vector of observed values $\hat{x} = (x_1,x_2,\cdots,X_n)$.
# Let $\hat{x}_i$ denote the vector
# $$
# \hat{x}_i = (x_1,x_2,\cdots,x_{i-1},x_{i+1},\cdots,x_n),
# $$
# which equals the vector $\hat{x}$ with the exception that observation
# number $i$ is left out. Using this notation, define
# $\widehat{\theta}_i$ to be the estimator
# $\widehat{\theta}$ computed using $\vec{X}_i$.
#
#
# ## Resampling methods: Jackknife estimator
#
# To get an estimate for the bias and
# standard error of $\widehat{\theta}$, use the following
# estimators for each component of $\widehat{\theta}$
# $$
# \widehat{\mathrm{Bias}}(\widehat \theta,\theta) = (n-1)\left( - \widehat{\theta} + \frac{1}{n}\sum_{i=1}^{n} \widehat \theta_i \right) \qquad \text{and} \qquad \widehat{\sigma}^2_{\widehat{\theta} } = \frac{n-1}{n}\sum_{i=1}^{n}( \widehat{\theta}_i - \frac{1}{n}\sum_{j=1}^{n}\widehat \theta_j )^2.
# $$
# ## Jackknife code example
# In[1]:
from numpy import *
from numpy.random import randint, randn
from time import time
def jackknife(data, stat):
n = len(data);t = zeros(n); inds = arange(n); t0 = time()
## 'jackknifing' by leaving out an observation for each i
for i in range(n):
t[i] = stat(delete(data,i) )
# analysis
print("Runtime: %g sec" % (time()-t0)); print("Jackknife Statistics :")
print("original bias std. error")
print("%8g %14g %15g" % (stat(data),(n-1)*mean(t)/n, (n*var(t))**.5))
return t
# Returns mean of data samples
def stat(data):
return mean(data)
mu, sigma = 100, 15
datapoints = 10000
x = mu + sigma*random.randn(datapoints)
# jackknife returns the data sample
t = jackknife(x, stat)
# ## Resampling methods: Bootstrap
# Bootstrapping is a nonparametric approach to statistical inference
# that substitutes computation for more traditional distributional
# assumptions and asymptotic results. Bootstrapping offers a number of
# advantages:
# 1. The bootstrap is quite general, although there are some cases in which it fails.
#
# 2. Because it does not require distributional assumptions (such as normally distributed errors), the bootstrap can provide more accurate inferences when the data are not well behaved or when the sample size is small.
#
# 3. It is possible to apply the bootstrap to statistics with sampling distributions that are difficult to derive, even asymptotically.
#
# 4. It is relatively simple to apply the bootstrap to complex data-collection plans (such as stratified and clustered samples).
#
#
#
#
#
# ## Resampling methods: Bootstrap background
#
# Since $\widehat{\theta} = \widehat{\theta}(\hat{X})$ is a function of random variables,
# $\widehat{\theta}$ itself must be a random variable. Thus it has
# a pdf, call this function $p(\hat{t})$. The aim of the bootstrap is to
# estimate $p(\hat{t})$ by the relative frequency of
# $\widehat{\theta}$. You can think of this as using a histogram
# in the place of $p(\hat{t})$. If the relative frequency closely
# resembles $p(\vec{t})$, then using numerics, it is straight forward to
# estimate all the interesting parameters of $p(\hat{t})$ using point
# estimators.
#
#
#
# ## Resampling methods: More Bootstrap background
#
# In the case that $\widehat{\theta}$ has
# more than one component, and the components are independent, we use the
# same estimator on each component separately. If the probability
# density function of $X_i$, $p(x)$, had been known, then it would have
# been straight forward to do this by:
# 1. Drawing lots of numbers from $p(x)$, suppose we call one such set of numbers $(X_1^*, X_2^*, \cdots, X_n^*)$.
#
# 2. Then using these numbers, we could compute a replica of $\widehat{\theta}$ called $\widehat{\theta}^*$.
#
# By repeated use of (1) and (2), many
# estimates of $\widehat{\theta}$ could have been obtained. The
# idea is to use the relative frequency of $\widehat{\theta}^*$
# (think of a histogram) as an estimate of $p(\hat{t})$.
#
#
# ## Resampling methods: Bootstrap approach
#
# But
# unless there is enough information available about the process that
# generated $X_1,X_2,\cdots,X_n$, $p(x)$ is in general
# unknown. Therefore, [Efron in 1979](https://projecteuclid.org/euclid.aos/1176344552) asked the
# question: What if we replace $p(x)$ by the relative frequency
# of the observation $X_i$; if we draw observations in accordance with
# the relative frequency of the observations, will we obtain the same
# result in some asymptotic sense? The answer is yes.
#
#
# Instead of generating the histogram for the relative
# frequency of the observation $X_i$, just draw the values
# $(X_1^*,X_2^*,\cdots,X_n^*)$ with replacement from the vector
# $\hat{X}$.
#
#
# ## Resampling methods: Bootstrap steps
#
# The independent bootstrap works like this:
#
# 1. Draw with replacement $n$ numbers for the observed variables $\hat{x} = (x_1,x_2,\cdots,x_n)$.
#
# 2. Define a vector $\hat{x}^*$ containing the values which were drawn from $\hat{x}$.
#
# 3. Using the vector $\hat{x}^*$ compute $\widehat{\theta}^*$ by evaluating $\widehat \theta$ under the observations $\hat{x}^*$.
#
# 4. Repeat this process $k$ times.
#
# When you are done, you can draw a histogram of the relative frequency of $\widehat \theta^*$. This is your estimate of the probability distribution $p(t)$. Using this probability distribution you can estimate any statistics thereof. In principle you never draw the histogram of the relative frequency of $\widehat{\theta}^*$. Instead you use the estimators corresponding to the statistic of interest. For example, if you are interested in estimating the variance of $\widehat \theta$, apply the etsimator $\widehat \sigma^2$ to the values $\widehat \theta ^*$.
#
#
#
# ## Code example for the Bootstrap method
#
# The following code starts with a Gaussian distribution with mean value $\mu =100$ and variance $\sigma=15$. We use this to generate the data used in the bootstrap analysis. The bootstrap analysis returns a data set after a given number of bootstrap operations (as many as we have data points). This data set consists of estimated mean values for each bootstrap operation. The histogram generated by the bootstrap method shows that the distribution for these mean values is also a Gaussian, centered around the mean value $\mu=100$ but with standard deviation $\sigma/\sqrt{n}$, where $n$ is the number of bootstrap samples (in this case the same as the number of original data points). The value of the standard deviation is what we expect from the central limit theorem.
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('matplotlib', 'inline')
from numpy import *
from numpy.random import randint, randn
from time import time
from scipy.stats import norm
import matplotlib.pyplot as plt
# Returns mean of bootstrap samples
def stat(data):
return mean(data)
# Bootstrap algorithm
def bootstrap(data, statistic, R):
t = zeros(R); n = len(data); inds = arange(n); t0 = time()
# non-parametric bootstrap
for i in range(R):
t[i] = statistic(data[randint(0,n,n)])
# analysis
print("Runtime: %g sec" % (time()-t0)); print("Bootstrap Statistics :")
print("original bias std. error")
print("%8g %8g %14g %15g" % (statistic(data), std(data), mean(t), std(t)))
return t
mu, sigma = 100, 15
datapoints = 10000
x = mu + sigma*random.randn(datapoints)
# bootstrap returns the data sample t = bootstrap(x, stat, datapoints)
# the histogram of the bootstrapped data
t = bootstrap(x, stat, datapoints)
# the histogram of the bootstrapped data
n, binsboot, patches = plt.hist(t, bins=50, density='true',histtype='bar', color='red', alpha=0.75)
# add a 'best fit' line
y = norm.pdf( binsboot, mean(t), std(t))
lt = plt.plot(binsboot, y, 'r--', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.axis([99.5, 100.6, 0, 3.0])
plt.grid(True)
plt.show()
# ## Resampling methods: Blocking
#
# The blocking method was made popular by [Flyvbjerg and Pedersen (1989)](https://aip.scitation.org/doi/10.1063/1.457480)
# and has become one of the standard ways to estimate
# $V(\widehat{\theta})$ for exactly one $\widehat{\theta}$, namely
# $\widehat{\theta} = \overline{X}$.
#
# Assume $n = 2^d$ for some integer $d>1$ and $X_1,X_2,\cdots, X_n$ is a stationary time series to begin with.
# Moreover, assume that the time series is asymptotically uncorrelated. We switch to vector notation by arranging $X_1,X_2,\cdots,X_n$ in an $n$-tuple. Define:
# $$
# \begin{align*}
# \hat{X} = (X_1,X_2,\cdots,X_n).
# \end{align*}
# $$
# The strength of the blocking method is when the number of
# observations, $n$ is large. For large $n$, the complexity of dependent
# bootstrapping scales poorly, but the blocking method does not,
# moreover, it becomes more accurate the larger $n$ is.
#
#
# ## Blocking Transformations
# We now define
# blocking transformations. The idea is to take the mean of subsequent
# pair of elements from $\vec{X}$ and form a new vector
# $\vec{X}_1$. Continuing in the same way by taking the mean of
# subsequent pairs of elements of $\vec{X}_1$ we obtain $\vec{X}_2$, and
# so on.
# Define $\vec{X}_i$ recursively by:
# $$
# (\vec{X}_0)_k \equiv (\vec{X})_k \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# (\vec{X}_{i+1})_k \equiv \frac{1}{2}\Big( (\vec{X}_i)_{2k-1} +
# (\vec{X}_i)_{2k} \Big) \qquad \text{for all} \qquad 1 \leq i \leq d-1
# \label{_auto2} \tag{5}
# \end{equation}
# $$
# The quantity $\vec{X}_k$ is
# subject to $k$ **blocking transformations**. We now have $d$ vectors
# $\vec{X}_0, \vec{X}_1,\cdots,\vec X_{d-1}$ containing the subsequent
# averages of observations. It turns out that if the components of
# $\vec{X}$ is a stationary time series, then the components of
# $\vec{X}_i$ is a stationary time series for all $0 \leq i \leq d-1$
#
# We can then compute the autocovariance, the variance, sample mean, and
# number of observations for each $i$.
# Let $\gamma_i, \sigma_i^2,
# \overline{X}_i$ denote the autocovariance, variance and average of the
# elements of $\vec{X}_i$ and let $n_i$ be the number of elements of
# $\vec{X}_i$. It follows by induction that $n_i = n/2^i$.
#
#
# ## Blocking Transformations
#
# Using the
# definition of the blocking transformation and the distributive
# property of the covariance, it is clear that since $h =|i-j|$
# we can define
# $$
# \gamma_{k+1}(h) = cov\left( ({X}_{k+1})_{i}, ({X}_{k+1})_{j} \right) \nonumber
# $$
# $$
# = \frac{1}{4}cov\left( ({X}_{k})_{2i-1} + ({X}_{k})_{2i}, ({X}_{k})_{2j-1} + ({X}_{k})_{2j} \right) \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# = \frac{1}{2}\gamma_{k}(2h) + \frac{1}{2}\gamma_k(2h+1) \hspace{0.1cm} \mathrm{h = 0}
# \label{_auto3} \tag{6}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# =\frac{1}{4}\gamma_k(2h-1) + \frac{1}{2}\gamma_k(2h) + \frac{1}{4}\gamma_k(2h+1) \quad \mathrm{else}
# \label{_auto4} \tag{7}
# \end{equation}
# $$
# The quantity $\hat{X}$ is asymptotic uncorrelated by assumption, $\hat{X}_k$ is also asymptotic uncorrelated. Let's turn our attention to the variance of the sample mean $V(\overline{X})$.
#
#
# ## Blocking Transformations, getting there
# We have
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# V(\overline{X}_k) = \frac{\sigma_k^2}{n_k} + \underbrace{\frac{2}{n_k} \sum_{h=1}^{n_k-1}\left( 1 - \frac{h}{n_k} \right)\gamma_k(h)}_{\equiv e_k} = \frac{\sigma^2_k}{n_k} + e_k \quad \text{if} \quad \gamma_k(0) = \sigma_k^2.
# \label{_auto5} \tag{8}
# \end{equation}
# $$
# The term $e_k$ is called the **truncation error**:
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# e_k = \frac{2}{n_k} \sum_{h=1}^{n_k-1}\left( 1 - \frac{h}{n_k} \right)\gamma_k(h).
# \label{_auto6} \tag{9}
# \end{equation}
# $$
# We can show that $V(\overline{X}_i) = V(\overline{X}_j)$ for all $0 \leq i \leq d-1$ and $0 \leq j \leq d-1$.
#
#
# ## Blocking Transformations, final expressions
#
# We can then wrap up
# $$
# n_{j+1} \overline{X}_{j+1} = \sum_{i=1}^{n_{j+1}} (\hat{X}_{j+1})_i = \frac{1}{2}\sum_{i=1}^{n_{j}/2} (\hat{X}_{j})_{2i-1} + (\hat{X}_{j})_{2i} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# = \frac{1}{2}\left[ (\hat{X}_j)_1 + (\hat{X}_j)_2 + \cdots + (\hat{X}_j)_{n_j} \right] = \underbrace{\frac{n_j}{2}}_{=n_{j+1}} \overline{X}_j = n_{j+1}\overline{X}_j.
# \label{_auto7} \tag{10}
# \end{equation}
# $$
# By repeated use of this equation we get $V(\overline{X}_i) = V(\overline{X}_0) = V(\overline{X})$ for all $0 \leq i \leq d-1$. This has the consequence that
# <!-- Equation labels as ordinary links -->
# <div id="eq:convergence"></div>
#
# $$
# \begin{equation}
# V(\overline{X}) = \frac{\sigma_k^2}{n_k} + e_k \qquad \text{for all} \qquad 0 \leq k \leq d-1. \label{eq:convergence} \tag{11}
# \end{equation}
# $$
# Flyvbjerg and Petersen demonstrated that the sequence
# $\{e_k\}_{k=0}^{d-1}$ is decreasing, and conjecture that the term
# $e_k$ can be made as small as we would like by making $k$ (and hence
# $d$) sufficiently large. The sequence is decreasing (Master of Science thesis by Marius Jonsson, UiO 2018).
# It means we can apply blocking transformations until
# $e_k$ is sufficiently small, and then estimate $V(\overline{X})$ by
# $\widehat{\sigma}^2_k/n_k$.
#
#
# For an elegant solution and proof of the blocking method, see the recent article of [Marius Jonsson (former MSc student of the Computational Physics group)](https://journals.aps.org/pre/abstract/10.1103/PhysRevE.98.043304).
|
91b0bc69f69bf7cd77443d43d161c60d2fa11920
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/lib/galaxy/datatypes/hdf5.py
|
87e8a70d82d11d1529f64d2b27aa4a6efc00c5bc
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,671
|
py
|
hdf5.py
|
"""Composite datatype for the HDF5SummarizedExperiment R data object.
This datatype was created for use with the iSEE interactive tool.
"""
from typing import Optional
from galaxy.datatypes.data import Data
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.protocols import (
HasExtraFilesAndMetadata,
HasMetadata,
)
class HDF5SummarizedExperiment(Data):
"""Composite datatype to represent HDF5SummarizedExperiment objects.
A lightweight shell file `se.rds` is read into memory by R, and provides an
interface to the much larger `assays.h5` files which contains the
experiment data.
Within R, the HDF5SummarizedExperiment object is conventionally referenced
by the parent directory name of these two files.
In Galaxy tool commands, the parent directory can be accessed through
`param_name.extra_files_path`.
"""
MetadataElement(
name="base_name",
desc="SummarisedExperiment object name",
default="HDF5 SE object",
readonly=True,
set_in_upload=True,
)
file_ext = "rdata.se"
composite_type = "auto_primary_file"
allow_datatype_change = False
def __init__(self, **kwd):
"""Construct object from input files."""
Data.__init__(self, **kwd)
self.add_composite_file(
"se.rds",
is_binary=True,
description="Summarized experiment RDS object",
)
self.add_composite_file(
"assays.h5",
is_binary=True,
description="Summarized experiment data array",
)
def init_meta(self, dataset: HasMetadata, copy_from: Optional[HasMetadata] = None) -> None:
"""Override parent init metadata."""
Data.init_meta(self, dataset, copy_from=copy_from)
def generate_primary_file(self, dataset: HasExtraFilesAndMetadata) -> str:
"""Generate primary file to represent dataset."""
return f"""
<html>
<head>
<title> Files for Composite Dataset ({self.file_ext})</title>
</head>
<p/>
This composite dataset is composed of the following files:
</p>
<ul>
<li><a href="se.rds">se.rds</a>
<li><a href="array.h5">array.h5</a>
</ul>
</html>
"""
def sniff(self, filename: str) -> bool:
"""
Returns false and the user must manually set.
"""
return False
def get_mime(self) -> str:
"""Return the mime type of the datatype."""
return "text/html"
|
40df4ed8b312eef982cea0f207525a6d1b25507f
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/tools/azure-sdk-tools/tests/integration/conftest.py
|
a2aec165281921ca9d3c70ac7d369e53a6f82129
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
conftest.py
|
import pytest
from devtools_testutils import test_proxy
@pytest.fixture(scope="session", autouse=True)
def start_proxy(test_proxy):
return
|
ec95d6bf801e10596716796670987c898f4f3266
|
5be3a03ca2d74e0263338af3cc91d2cf1b7f75d9
|
/beautifulsoup/beautifuldemo.py
|
bc4595b1cdf364ba464c3cfb4051ea22ca7b2257
|
[] |
no_license
|
secondtonone1/python-
|
d969a1bbc5c8c2b27a0bb75f91159da6d1ce83f7
|
19a66d70c20293875ad29a868f42a9b3a5826422
|
refs/heads/master
| 2022-11-05T00:31:53.300764
| 2022-09-26T03:10:11
| 2022-09-26T03:10:11
| 98,652,204
| 177
| 154
| null | 2022-10-30T08:55:38
| 2017-07-28T13:29:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
beautifuldemo.py
|
#-*-coding:utf-8-*-
import requests
import re
import time
from lxml import etree
from bs4 import BeautifulSoup
if __name__ == "__main__":
#html = '''div id="sslct_menu" class="cl p_pop" style="display: none;">
#<span class="sslct_btn" onClick="extstyle('')" title="默认"><i></i></span></div>
#<ul id="myitem_menu" class="p_pop" style="display: none;">
#<li><a href="https://www.aisinei.org/forum.php?mod=guide&view=my">帖子</a></li>
#<li><a href="https://www.aisinei.org/home.php?mod=space&do=favorite&view=me">收藏</a></li>'''
#bs = BeautifulSoup(html)
#print(bs.prettify())
# s =BeautifulSoup('test.html','lxml')
#print(s.prettify())
html2 = ''' <li class="bus_postbd item masonry_brick">
<div class="bus_vtem">
<a href="https://www.aisinei.org/thread-17846-1-1.html" title="XIUREN秀人网 2018.11.13 NO.1228 猫宝 [50+1P]" class="preview" target="_blank">
hello world
<img src="https://i.asnpic.win/block/a4/a42e6c63ef1ae20a914699f183d5204b.jpg" width="250" height="375" alt="XIUREN秀人网 2018.11.13 NO.1228 猫宝 [50+1P]"/>
ss2<span class="bus_listag">XIUREN秀人网</span>
</a>
<a href="https://www.aisinei.org/thread-17846-1-1.html" title="XIUREN秀人网 2018.11.13 NO.1228 猫宝 [50+1P]" target="_blank">
<div class="lv-face"><img src="https://www.aisinei.org/uc_server/avatar.php?uid=2&size=small" alt="发布组小乐"/></div>
<div class="t">XIUREN秀人网 2018.11.13 NO.1228 猫宝 [50</div>
<div class="i"><span><i class="bus_showicon bus_showicon_v"></i>6402</span><span><i class="bus_showicon bus_showicon_r"></i>1</span></div>
</a>
</div>
</li> '''
s2 = BeautifulSoup(html2,'lxml')
#print(s2.li)
#print(s2.a)
#print(s2.a.name)
#print(s2.a.attrs)
#print(s2.a.string)
#print(s2.a.text)
#print(s2.div.contents)
#print(s2.div.children)
#print(s2.div.contents[0])
#for i in s2.div.children:
#print(i)
#print(s2.div)
#print(s2.a["href"])
#print(s2.a.get("href"))
'''
#孙子节点
print(s2.div.descendants)
#祖先节点
print(s2.div.parents)
#直接父节点
print(s2.div.parent)
#下一个兄弟节点
print(s2.a.next_sibling)
#前一个兄弟节点
print(s2.a.previous_sibling)
print(s2.find('a'))
print(s2.find_all('a'))
print(s2.find_all(re.compile("^div")))
print(s2.find_all(["div","li"]))
'''
#查找节点为div的数据
print(s2.select('a'))
#查找class为bus_vtem的节点
print(s2.select('.bus_vtem'))
#查找id为ps的节点
print(s2.select('#ps'))
|
b0879469dc89212e7ee8840527677c7727525c0c
|
0b7c3acec16b87843c963b3e1bbe0e6b61073b47
|
/chatlog/base/constant.py
|
e6acc3f9b75e8fdae222de19225d73ec27023ba1
|
[
"Apache-2.0"
] |
permissive
|
DingHanyang/chatLog
|
8ab3f2db41484d33f41573ce9f6ffaf99f2c7e7d
|
2ac503faa3fd48019e5ddffbc5d37e54f8f30f25
|
refs/heads/master
| 2023-06-22T21:17:49.227116
| 2023-04-17T02:22:23
| 2023-04-17T02:22:23
| 94,080,895
| 149
| 36
|
Apache-2.0
| 2023-06-11T14:29:13
| 2017-06-12T09:50:52
|
Python
|
UTF-8
|
Python
| false
| false
| 316
|
py
|
constant.py
|
# re
JUDGE_TIME_RE = '^(((20[0-3][0-9]-(0[13578]|1[02])-(0[1-9]|[12][0-9]|3[01]))|(20[0-3][0-9]-(0[2469]|11)-(0[1-9]|[12][' \
'0-9]|30))) (20|21|22|23|[0-9]|[0-1][0-9]):[0-5][0-9]:[0-5][0-9])'
JUDGE_ID_RE = '[(][1-9]\d{4,}[)]$|[<][A-Za-z\d]+([-_.][A-Za-z\d]+)*@([A-Za-z\d]+[-.])+[A-Za-z\d]{2,4}[>]$'
|
05f81d32b97994f25b7d5dcb48050e29c918c6bf
|
b4c904a288393ec372b3b9feb91960c102d201f3
|
/aleph/model/mapping.py
|
4d052ba9755fa7ac47833d8269d59f94c75e3d41
|
[
"MIT"
] |
permissive
|
alephdata/aleph
|
31909b3e5678f4a72721a239d761aafb734ffcd4
|
0bcdac81768058cc4055b1aa1ad7069026569f5e
|
refs/heads/develop
| 2023-09-05T07:28:01.863600
| 2023-09-04T18:19:22
| 2023-09-04T18:19:22
| 23,404,128
| 1,481
| 270
|
MIT
| 2023-09-13T12:56:37
| 2014-08-27T20:21:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,787
|
py
|
mapping.py
|
import logging
from datetime import datetime
from normality import stringify
from sqlalchemy.dialects.postgresql import JSONB
from aleph.core import db
from aleph.model import Role, Collection
from aleph.model.entityset import EntitySet
from aleph.model.common import iso_text, DatedModel, Status
from aleph.model.common import ENTITY_ID_LEN
log = logging.getLogger(__name__)
class Mapping(db.Model, DatedModel):
"""A mapping to load entities from a table"""
__tablename__ = "mapping"
id = db.Column(db.Integer, primary_key=True)
query = db.Column("query", JSONB)
role_id = db.Column(db.Integer, db.ForeignKey("role.id"), index=True)
role = db.relationship(Role, backref=db.backref("mappings", lazy="dynamic")) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey("collection.id"), index=True)
collection = db.relationship(
Collection, backref=db.backref("mappings", lazy="dynamic")
)
entityset_id = db.Column(
db.String(ENTITY_ID_LEN), db.ForeignKey("entityset.id"), nullable=True
)
entityset = db.relationship(
EntitySet, backref=db.backref("mappings", lazy="dynamic")
)
table_id = db.Column(db.String(ENTITY_ID_LEN), index=True)
disabled = db.Column(db.Boolean, nullable=True)
last_run_status = db.Column(db.Unicode, nullable=True, default=Status.DEFAULT)
last_run_err_msg = db.Column(db.Unicode, nullable=True)
def get_proxy_context(self):
"""Metadata to be added to each generated entity."""
return {
"created_at": iso_text(self.created_at),
"updated_at": iso_text(self.updated_at),
"role_id": self.role_id,
"mutable": True,
}
def update(self, query=None, table_id=None, entityset_id=None):
if query:
self.query = query
if table_id:
self.table_id = table_id
self.entityset_id = entityset_id
self.updated_at = datetime.utcnow()
db.session.add(self)
def set_status(self, status, error=None):
self.last_run_status = status
self.last_run_err_msg = error
self.updated_at = datetime.utcnow()
db.session.add(self)
def to_dict(self):
data = self.to_dict_dates()
data.update(
{
"id": stringify(self.id),
"query": dict(self.query),
"role_id": stringify(self.role_id),
"collection_id": stringify(self.collection_id),
"entityset_id": stringify(self.entityset_id),
"table_id": self.table_id,
"last_run_status": Status.LABEL.get(self.last_run_status),
"last_run_err_msg": self.last_run_err_msg,
}
)
return data
@classmethod
def by_collection(cls, collection_id, table_id=None):
q = cls.all().filter(cls.collection_id == collection_id)
if table_id is not None:
q = q.filter(cls.table_id == table_id)
return q
@classmethod
def delete_by_collection(cls, collection_id):
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
@classmethod
def delete_by_table(cls, entity_id):
pq = db.session.query(cls)
pq = pq.filter(cls.table_id == entity_id)
pq.delete(synchronize_session=False)
@classmethod
def create(cls, query, table_id, collection, role_id, entityset_id=None):
mapping = cls()
mapping.role_id = role_id
mapping.collection_id = collection.id
mapping.update(query, table_id, entityset_id)
return mapping
def __repr__(self):
return "<Mapping(%r, %r)>" % (self.id, self.table_id)
|
1f65708184bb80589abd9cb08d31d9f3349350d8
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/Hub.py
|
1e7605848d728ab79a64922d336195cd6ed8e3d3
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
Hub.py
|
class Hub(Element,IDisposable):
""" Represents a connection between two or more Autodesk Revit Elements. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetHubConnectorManager(self):
"""
GetHubConnectorManager(self: Hub) -> ConnectorManager
Retrieves the ConnectorManager of the Hub.
Returns: The ConnectorManager.
"""
pass
def GetOrigin(self):
"""
GetOrigin(self: Hub) -> XYZ
Retrieves position of a Hub if such position is a 3D point.
Returns: The origin.
"""
pass
def HasOrigin(self):
"""
HasOrigin(self: Hub) -> bool
Provides information if Hub has a specific location at point in 3D space.
Returns: True if the Hub has a specific location at point in 3D space.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
ad706c8dff7a923bc8324b25b38ac217ba387942
|
6f797bae522927214b4c4065d88b92d6fff127e0
|
/kur/backend/pytorch/modules.py
|
9b168998e1286fe046ecee3b6a0d0176b93b9fd5
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
deepgram/kur
|
5a3c6b5dba462327ccb134dcde53bf60ee4bf1fd
|
fd0c120e50815c1e5be64e5dde964dcd47234556
|
refs/heads/master
| 2023-08-17T11:38:47.613445
| 2020-11-04T19:09:50
| 2020-11-04T19:09:50
| 74,182,569
| 873
| 139
|
Apache-2.0
| 2023-01-28T21:50:24
| 2016-11-19T02:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 14,729
|
py
|
modules.py
|
"""
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import logging
from collections import OrderedDict
# pylint: disable=import-error
import torch
import torch.nn as nn
from torch.autograd import Variable
# pylint: enable=import-error
import numpy
logger = logging.getLogger(__name__)
###############################################################################
class BYOM(nn.Module): # pylint: disable=too-few-public-methods
""" Bring-Your-Own-Module for PyTorch.
This is just a simple PyTorch module that allows you to build up your
module in pieces (using `setattr` to register parameters, and `func` to
perform the forward pass). It gives you the benefits of a Module with
the flexibility of Kur's graph interpretation.
"""
###########################################################################
def __init__(self):
""" Creates a new Module.
"""
super().__init__()
self.func = None
###########################################################################
def forward(self, *inputs):
""" Performs the forward pass.
"""
assert self.func is not None
return self.func(self, *inputs)
###############################################################################
class Layer:
""" Holds a PyTorch "layer". This is important because PyTorch copies the
Modules for each GPU; that is, you cannot hold on to Module refernces,
or even layer references, since they may change. This Layer class
abstracts that away by dynamically grabbing the correct layer instance
during the forward pass.
"""
###########################################################################
def __init__(self, name, func=None):
""" Creates a new layer.
"""
self.name = name
self.func = func
###########################################################################
def __call__(self, module, *x):
""" Grab the instantiated layer and evaluate it.
"""
operation = getattr(module, self.name)
try:
if self.func:
return self.func(operation, *x)
return operation(*x)
except:
logger.error('Failed to apply layer: %s', self.name)
for i, X in enumerate(x):
logger.error(' Input shape #%d: %s', i+1, list(X.size()))
raise
###########################################################################
@staticmethod
def resolve(value):
""" Convenience function for calling either Layers or standard PyTorch
operations.
"""
if isinstance(value, Layer):
return value
elif hasattr(value, 'pure') and value.pure:
return value
return lambda _, *args: value(*args)
###############################################################################
class TorchModel:
""" This holds the Torch graph and provides convenience functions for using
it.
"""
DATA_CAST = {
'int' : lambda x: x.int(),
'long' : lambda x: x.long(),
'float' : lambda x: x.float(),
'double' : lambda x: x.double()
}
###########################################################################
def __init__(self, gpu=None):
""" Creates a new model.
"""
self.model = BYOM()
self.inputs = []
self.outputs = None
self.layer_map = {}
self.gpu = gpu
self._reuse = False
self.final_model = None
self.info = []
###########################################################################
@property
def allow_reuse(self):
""" Getter for the `allow_reuse` property, which, if enabled, causes
`add_layer` calls to reuse existing layer when possible, rather
than raising an exception about duplicate names.
"""
return self._reuse
###########################################################################
@allow_reuse.setter
def allow_reuse(self, value):
self._reuse = value
###########################################################################
def set_outputs(self, outputs):
""" Sets the model outputs.
# Arguments
outputs: an OrderedDict, or a list of `(name, layer)` tuples. Each
`name` (or key of the OrderedDict) is the name of a layer, and
each `layer` is a function as returned by `add_operation` or
`add_layer`, once that function has been applied to other
layers.
"""
self.outputs = OrderedDict(outputs)
self.model.func = self.add_operation(bundle)(
*self.outputs.values()
)
self.final_model = self.parallelize()
###########################################################################
def parallelize(self):
""" Applies any parallelism requested.
"""
if not self.gpu:
return self.model
if isinstance(self.gpu, bool):
devices = None
else:
devices = self.gpu
return nn.DataParallel(self.model, devices).cuda()
###########################################################################
def to_torch(self, tensor, *, location=None, data_type=None):
""" Creates a Torch tensor from an array.
# Arguments
tensor: one of tuple, list, numpy.ndarray, torch.Tensor
"""
#if self.gpu:
# if numpy_tensor.dtype.kind == 'f' and \
# numpy_tensor.dtype.itemsize != 4:
# numpy_tensor = numpy_tensor.astype(
# '{}f4'.format(numpy_tensor.dtype.byteorder)
# )
if isinstance(tensor, (list, tuple)):
tensor = numpy.array(tensor)
if isinstance(tensor, numpy.ndarray):
tensor = torch.from_numpy(tensor)
tensor = self.DATA_CAST.get(data_type or 'float')(tensor)
if self.gpu:
if location != 'cpu':
tensor = tensor.cuda()
return tensor
###########################################################################
def predict(self, data):
""" Performs the forward pass.
"""
inputs = tuple(
Variable(self.to_torch(
data[k], location=info['location'], data_type=info['type']
))
for k, info in zip(self.inputs, self.info)
)
return self.final_model(*inputs)
###########################################################################
def cpu(self, x):
""" Moves a tensor (or list/tuple of tensors) back to the CPU.
"""
if isinstance(x, (list, tuple)):
return tuple(X.cpu() for X in x)
return x.cpu()
###########################################################################
def test(self, data, loss):
""" Performs a forward pass and calculates loss.
# Arguments
data: dict. A dictionary whose keys are the names of the model
layers, and whose respective values are numpy arrays contain
those values for the batch. For this function, both model
inputs and outputs must be specified.
loss: a list of loss functions, each one corresponding to the
respective model output. These loss functions should take two
arguments; the first is a list of the tensors needed to compute
loss (e.g., the ground truth data), and the second is the model
output to compare against.
"""
inputs = tuple(
Variable(self.to_torch(
data[k], location=info['location'], data_type=info['type']
))
for k, info in zip(self.inputs, self.info)
)
predictions = self.final_model(*inputs)
#######################################################################
def get_loss(loss, prediction):
""" Calculates the loss given a loss specification.
"""
loss_inputs = [x[1](None, *inputs) for x in loss[0]]
return loss[1](loss_inputs, prediction)
losses = [
get_loss(loss[output], P)
for output, P in zip(self.outputs, predictions) if output in loss
]
return predictions, losses
###########################################################################
def move(self, module):
""" Moves a module to the GPU, if the GPU is enabled.
"""
if self.gpu:
return module.cuda()
return module
###########################################################################
def placeholder(self, name, *, create=True, location=None, data_type=None):
""" Creates a new input placeholder, or retrieves an existing one.
"""
# Placeholders are just named ways to access one of the input tensors.
try:
index = self.inputs.index(name)
except ValueError:
if not create:
return None
index = len(self.inputs)
self.inputs.append(name)
self.info.append({
'location' : location,
'type' : data_type
})
#######################################################################
def calculate(_, *inputs):
""" Applies the layer.
"""
if index >= len(inputs):
raise IndexError('Out-of-range index: {}. Must be < {}. Note: '
'we are trying to find "{}".'
.format(index, len(inputs), calculate.target))
return inputs[index]
calculate.target = name
calculate.index = index
calculate.name = name
return calculate
###########################################################################
@staticmethod
def normalize_name(name):
""" Creates a PyTorch-compatible layer name.
In PyTorch, "layers" can be automatically tracked by a Module, but
only if they are class attributes. So we will take a name, and get
a Pythonically-allowed variable to use for the attribute.
"""
new_name = re.sub(r'^[^a-zA-Z_]|[^a-zA-Z0-9_]', r'_', name)
new_name = 'layer_{}'.format(new_name)
return new_name
###########################################################################
def backprop(self, losses):
""" Runs the backward pass on the network.
"""
grads = [self.to_torch([1.0]) for _ in range(len(losses))]
torch.autograd.backward(
losses,
grads
)
###########################################################################
def clip_gradients(self, clip_type, clip_value):
if clip_type == 'norm':
norm_type = 2
elif clip_type == 'abs':
norm_type = 'inf'
else:
raise ValueError('Clip type must be "norm" or "abs".')
nn.utils.clip_grad_norm(
self.get_trainable_parameters(),
clip_value,
norm_type
)
###########################################################################
def add_operation(self, operation, name=None):# pylint: disable=no-self-use
""" Adds a new operation to the graph.
# Notes
- Operations have no learnable parameters, and have no names.
"""
if name is None:
name = operation.__name__
#######################################################################
def stack(*lower_layers):
""" Returns a function suitable for using in a functional paradigm
of model assemble.
"""
logger.trace('Connecting layers: %s feed into %s',
[
x.name if hasattr(x, 'name') else 'unknown'
for x in lower_layers
], name
)
###################################################################
def calculate(module, *inputs):
""" Applies the layer.
"""
result = Layer.resolve(operation)(
module,
*[x(module, *inputs) for x in lower_layers]
)
return result
calculate.name = name
calculate.op = operation
return calculate
stack.name = name
stack.op = operation
return stack
###########################################################################
def add_variable(self, name, value, func=None):
""" Adds a new variable.
"""
new_name = self.normalize_name(name)
if name in self.layer_map:
if not self.allow_reuse:
raise ValueError('Duplicate name found: {}'.format(name))
else:
self.layer_map[name] = new_name
logger.trace('Adding new layer: %s (internal: "%s")',
name, new_name)
setattr(self.model, new_name, value)
return Layer(new_name, func)
###########################################################################
def add_layer(self, name, layer, *, func=None, frozen=False):
""" Creates a new layer.
# Notes
- The layer must not already exist.
- The layer will be registered as potentially containing learnable
parameters.
- All learnable layers must be added using this function.
"""
if frozen is not None:
for param in layer.parameters():
param.requires_grad = not frozen
layer = self.add_variable(name, layer, func)
return self.add_operation(layer, name=name)
###########################################################################
def get_trainable_parameters(self):
""" Returns a generator over all trainable model parameters.
"""
for param in self.model.parameters():
if param.requires_grad:
yield param
###############################################################################
def flatten(x):
""" Flattens an input tensor.
"""
return x.contiguous().view(x.size()[0], -1)
###############################################################################
def bundle(*x):
""" Merge tensors.
"""
return x
###############################################################################
def move_channel_forward(x):
ndim = x.dim()
permutation = (0, ndim-1) + tuple(range(1, ndim-1))
return x.permute(*permutation)
###############################################################################
def move_channel_backward(x):
ndim = x.dim()
permutation = (0, ) + tuple(range(2, ndim)) + (1, )
return x.permute(*permutation)
###############################################################################
class swap_channels:
""" Swaps the dimensions between Theano/PyTorch and TensorFlow dimension
orderings.
"""
begin = move_channel_forward
end = move_channel_backward
###############################################################################
def swap_batch_dimension(x):
permutation = (1, 0) + tuple(range(2, x.dim()))
return x.permute(*permutation)
###############################################################################
def parallel(layer):
""" Creates a parallel operation (i.e., map/distributed operation).
"""
def func(module, x):
""" The actual wrapped operation.
"""
return torch.stack(
tuple(Layer.resolve(layer)(module, X) for X in torch.unbind(x, 0)),
0
)
func.pure = True
return func
###############################################################################
def multiply(x, y):
""" Multiplies two layers.
"""
return x * y
###############################################################################
def add(x, y):
""" Adds two layers.
"""
return x + y
###############################################################################
def constant_minus(c):
""" Returns a functor that computes a constant minus a layer.
"""
def func(x):
""" Inner function.
"""
return c - x
return func
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
14f67fa4ecdcd0b5d221e5e08e1390e83b2d9a31
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/telemetry/internal/forwarders/forwarder_utils_unittest.py
|
c33e9b27a48e4d691d66099f1c0c014218355b81
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 829
|
py
|
forwarder_utils_unittest.py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import tempfile
import unittest
from telemetry import decorators
from telemetry.internal.forwarders import forwarder_utils
class ReadRemotePortTests(unittest.TestCase):
@decorators.Disabled('win') # https://crbug.com/793256
def testReadRemotePort(self):
sample_output = [
'', '', 'Allocated port 42360 for remote forward to localhost:12345']
with tempfile.NamedTemporaryFile() as cros_stderr:
for line in sample_output:
cros_stderr.write(line + '\n')
cros_stderr.flush()
remote_port = forwarder_utils.ReadRemotePort(cros_stderr.name)
self.assertEqual(remote_port, 42360)
|
5d442c8f5c3b7a927e8b88c42f9efe7aad0ca94a
|
7f67919b5f5e087e8a26eacd8d5d1c1f94224cc6
|
/python/pyarmnn/examples/speech_recognition/run_audio_file.py
|
ddf6cb704c7457de1e45002fdd536d8c700e6e17
|
[
"BSD-3-Clause",
"CC0-1.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ARM-software/armnn
|
11f0b169291ade6a08cbef1e87b32000aeed4767
|
49f609d9e633f52fcdc98e6e06178e618597e87d
|
refs/heads/branches/armnn_23_05
| 2023-09-04T07:02:43.218253
| 2023-05-15T10:24:43
| 2023-05-15T16:08:53
| 124,536,178
| 1,053
| 329
|
MIT
| 2023-05-22T23:28:55
| 2018-03-09T12:11:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
run_audio_file.py
|
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
"""Automatic speech recognition with PyArmNN demo for processing audio clips to text."""
import sys
import os
import numpy as np
script_dir = os.path.dirname(__file__)
sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
from argparse import ArgumentParser
from network_executor import ArmnnNetworkExecutor
from utils import prepare_input_data
from audio_capture import AudioCaptureParams, capture_audio
from audio_utils import decode_text, display_text
from wav2letter_mfcc import Wav2LetterMFCC, W2LAudioPreprocessor
from mfcc import MFCCParams
# Model Specific Labels
labels = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm',
13: 'n',
14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y',
25: 'z',
26: "'", 27: ' ', 28: '$'}
def parse_args():
parser = ArgumentParser(description="ASR with PyArmNN")
parser.add_argument(
"--audio_file_path",
required=True,
type=str,
help="Path to the audio file to perform ASR",
)
parser.add_argument(
"--model_file_path",
required=True,
type=str,
help="Path to ASR model to use",
)
parser.add_argument(
"--preferred_backends",
type=str,
nargs="+",
default=["CpuAcc", "CpuRef"],
help="""List of backends in order of preference for optimizing
subgraphs, falling back to the next backend in the list on unsupported
layers. Defaults to [CpuAcc, CpuRef]""",
)
return parser.parse_args()
def main(args):
# Read command line args
audio_file = args.audio_file_path
# Create the ArmNN inference runner
network = ArmnnNetworkExecutor(args.model_file_path, args.preferred_backends)
# Specify model specific audio data requirements
audio_capture_params = AudioCaptureParams(dtype=np.float32, overlap=31712, min_samples=47712, sampling_freq=16000,
mono=True)
buffer = capture_audio(audio_file, audio_capture_params)
# Extract features and create the preprocessor
mfcc_params = MFCCParams(sampling_freq=16000, num_fbank_bins=128, mel_lo_freq=0, mel_hi_freq=8000,
num_mfcc_feats=13, frame_len=512, use_htk_method=False, n_fft=512)
wmfcc = Wav2LetterMFCC(mfcc_params)
preprocessor = W2LAudioPreprocessor(wmfcc, model_input_size=296, stride=160)
current_r_context = ""
is_first_window = True
print("Processing Audio Frames...")
for audio_data in buffer:
# Prepare the input Tensors
input_data = prepare_input_data(audio_data, network.get_data_type(), network.get_input_quantization_scale(0),
network.get_input_quantization_offset(0), preprocessor)
# Run inference
output_result = network.run([input_data])
# Slice and Decode the text, and store the right context
current_r_context, text = decode_text(is_first_window, labels, output_result)
is_first_window = False
display_text(text)
print(current_r_context, flush=True)
if __name__ == "__main__":
args = parse_args()
main(args)
|
67f263eab91b426e225dbb5eb865e1181ee472ad
|
503bfe863ae9e92bf940a5e8baa57c0de44f4da6
|
/src/silx/math/medianfilter/test/test_medianfilter.py
|
023e03c89babd326120f301aa445938ad65bc472
|
[
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
silx-kit/silx
|
58105c0ed9cd02c75543c0c67a027471ca87922b
|
5e33cb69afd2a8b1cfe3183282acdd8b34c1a74f
|
refs/heads/main
| 2023-08-24T14:33:49.732794
| 2023-07-25T07:44:02
| 2023-07-25T07:44:02
| 43,291,718
| 120
| 78
|
MIT
| 2023-09-14T13:07:11
| 2015-09-28T09:23:13
|
Python
|
UTF-8
|
Python
| false
| false
| 28,518
|
py
|
test_medianfilter.py
|
# ##########################################################################
# Copyright (C) 2017-2022 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################
"""Tests of the median filter"""
__authors__ = ["H. Payno"]
__license__ = "MIT"
__date__ = "17/01/2018"
import unittest
import numpy
from silx.math.medianfilter import medfilt2d, medfilt1d
from silx.math.medianfilter.medianfilter import reflect, mirror
from silx.math.medianfilter.medianfilter import MODES as silx_mf_modes
from silx.utils.testutils import ParametricTestCase
try:
import scipy
except:
scipy = None
else:
try:
from scipy.misc import ascent
except:
from scipy.datasets import ascent
import scipy.ndimage
import logging
_logger = logging.getLogger(__name__)
RANDOM_FLOAT_MAT = numpy.array([
[0.05564293, 0.62717157, 0.75002406, 0.40555336, 0.70278975],
[0.76532598, 0.02839148, 0.05272484, 0.65166994, 0.42161216],
[0.23067427, 0.74219128, 0.56049024, 0.44406320, 0.28773158],
[0.81025249, 0.20303021, 0.68382382, 0.46372299, 0.81281709],
[0.94691602, 0.07813661, 0.81651256, 0.84220106, 0.33623165]])
RANDOM_INT_MAT = numpy.array([
[0, 5, 2, 6, 1],
[2, 3, 1, 7, 1],
[9, 8, 6, 7, 8],
[5, 6, 8, 2, 4]])
class TestMedianFilterNearest(ParametricTestCase):
"""Unit tests for the median filter in nearest mode"""
def testFilter3_100(self):
"""Test median filter on a 10x10 matrix with a 3x3 kernel."""
dataIn = numpy.arange(100, dtype=numpy.int32)
dataIn = dataIn.reshape((10, 10))
dataOut = medfilt2d(image=dataIn,
kernel_size=(3, 3),
conditional=False,
mode='nearest')
self.assertTrue(dataOut[0, 0] == 1)
self.assertTrue(dataOut[9, 0] == 90)
self.assertTrue(dataOut[9, 9] == 98)
self.assertTrue(dataOut[0, 9] == 9)
self.assertTrue(dataOut[0, 4] == 5)
self.assertTrue(dataOut[9, 4] == 93)
self.assertTrue(dataOut[4, 4] == 44)
def testFilter3_9(self):
"Test median filter on a 3x3 matrix with a 3x3 kernel."
dataIn = numpy.array([0, -1, 1,
12, 6, -2,
100, 4, 12],
dtype=numpy.int16)
dataIn = dataIn.reshape((3, 3))
dataOut = medfilt2d(image=dataIn,
kernel_size=(3, 3),
conditional=False,
mode='nearest')
self.assertTrue(dataOut.shape == dataIn.shape)
self.assertTrue(dataOut[1, 1] == 4)
self.assertTrue(dataOut[0, 0] == 0)
self.assertTrue(dataOut[0, 1] == 0)
self.assertTrue(dataOut[1, 0] == 6)
def testFilterWidthOne(self):
"""Make sure a filter of one by one give the same result as the input
"""
dataIn = numpy.arange(100, dtype=numpy.int32)
dataIn = dataIn.reshape((10, 10))
dataOut = medfilt2d(image=dataIn,
kernel_size=(1, 1),
conditional=False,
mode='nearest')
self.assertTrue(numpy.array_equal(dataIn, dataOut))
def testFilter3_1d(self):
"""Test binding and result of the 1d filter"""
self.assertTrue(numpy.array_equal(
medfilt1d(RANDOM_INT_MAT[0], kernel_size=3, conditional=False,
mode='nearest'),
[0, 2, 5, 2, 1])
)
def testFilter3Conditionnal(self):
"""Test that the conditional filter apply correctly in a 10x10 matrix
with a 3x3 kernel
"""
dataIn = numpy.arange(100, dtype=numpy.int32)
dataIn = dataIn.reshape((10, 10))
dataOut = medfilt2d(image=dataIn,
kernel_size=(3, 3),
conditional=True,
mode='nearest')
self.assertTrue(dataOut[0, 0] == 1)
self.assertTrue(dataOut[0, 1] == 1)
self.assertTrue(numpy.array_equal(dataOut[1:8, 1:8], dataIn[1:8, 1:8]))
self.assertTrue(dataOut[9, 9] == 98)
def testFilter3_1D(self):
"""Simple test of a 3x3 median filter on a 1D array"""
dataIn = numpy.arange(100, dtype=numpy.int32)
dataOut = medfilt2d(image=dataIn,
kernel_size=(5),
conditional=False,
mode='nearest')
self.assertTrue(dataOut[0] == 0)
self.assertTrue(dataOut[9] == 9)
self.assertTrue(dataOut[99] == 99)
def testNaNs(self):
"""Test median filter on image with NaNs in nearest mode"""
# Data with a NaN in first corner
nan_corner = numpy.arange(100.).reshape(10, 10)
nan_corner[0, 0] = numpy.nan
output = medfilt2d(
nan_corner, kernel_size=3, conditional=False, mode='nearest')
self.assertEqual(output[0, 0], 10)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 11)
self.assertEqual(output[1, 1], 12)
# Data with some NaNs
some_nans = numpy.arange(100.).reshape(10, 10)
some_nans[0, 1] = numpy.nan
some_nans[1, 1] = numpy.nan
some_nans[1, 0] = numpy.nan
output = medfilt2d(
some_nans, kernel_size=3, conditional=False, mode='nearest')
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 20)
self.assertEqual(output[1, 1], 20)
class TestMedianFilterReflect(ParametricTestCase):
"""Unit test for the median filter in reflect mode"""
def testArange9(self):
"""Test from a 3x3 window to RANDOM_FLOAT_MAT"""
img = numpy.arange(9, dtype=numpy.int32)
img = img.reshape(3, 3)
kernel = (3, 3)
res = medfilt2d(image=img,
kernel_size=kernel,
conditional=False,
mode='reflect')
self.assertTrue(
numpy.array_equal(res.ravel(), [1, 2, 2, 3, 4, 5, 6, 6, 7]))
def testRandom10(self):
"""Test a (5, 3) window to a RANDOM_FLOAT_MAT"""
kernel = (5, 3)
thRes = numpy.array([
[0.23067427, 0.56049024, 0.56049024, 0.4440632, 0.42161216],
[0.23067427, 0.62717157, 0.56049024, 0.56049024, 0.46372299],
[0.62717157, 0.62717157, 0.56049024, 0.56049024, 0.4440632],
[0.76532598, 0.68382382, 0.56049024, 0.56049024, 0.42161216],
[0.81025249, 0.68382382, 0.56049024, 0.68382382, 0.46372299]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=False,
mode='reflect')
self.assertTrue(numpy.array_equal(thRes, res))
def testApplyReflect1D(self):
"""Test the reflect function used for the median filter in reflect mode
"""
# test for inside values
self.assertTrue(reflect(2, 3) == 2)
# test for boundaries values
self.assertTrue(reflect(3, 3) == 2)
self.assertTrue(reflect(4, 3) == 1)
self.assertTrue(reflect(5, 3) == 0)
self.assertTrue(reflect(6, 3) == 0)
self.assertTrue(reflect(7, 3) == 1)
self.assertTrue(reflect(-1, 3) == 0)
self.assertTrue(reflect(-2, 3) == 1)
self.assertTrue(reflect(-3, 3) == 2)
self.assertTrue(reflect(-4, 3) == 2)
self.assertTrue(reflect(-5, 3) == 1)
self.assertTrue(reflect(-6, 3) == 0)
self.assertTrue(reflect(-7, 3) == 0)
def testRandom10Conditionnal(self):
"""Test the median filter in reflect mode and with the conditionnal
option"""
kernel = (3, 1)
thRes = numpy.array([
[0.05564293, 0.62717157, 0.75002406, 0.40555336, 0.70278975],
[0.23067427, 0.62717157, 0.56049024, 0.44406320, 0.42161216],
[0.76532598, 0.20303021, 0.56049024, 0.46372299, 0.42161216],
[0.81025249, 0.20303021, 0.68382382, 0.46372299, 0.33623165],
[0.94691602, 0.07813661, 0.81651256, 0.84220106, 0.33623165]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=True,
mode='reflect')
self.assertTrue(numpy.array_equal(thRes, res))
def testNaNs(self):
"""Test median filter on image with NaNs in reflect mode"""
# Data with a NaN in first corner
nan_corner = numpy.arange(100.).reshape(10, 10)
nan_corner[0, 0] = numpy.nan
output = medfilt2d(
nan_corner, kernel_size=3, conditional=False, mode='reflect')
self.assertEqual(output[0, 0], 10)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 11)
self.assertEqual(output[1, 1], 12)
# Data with some NaNs
some_nans = numpy.arange(100.).reshape(10, 10)
some_nans[0, 1] = numpy.nan
some_nans[1, 1] = numpy.nan
some_nans[1, 0] = numpy.nan
output = medfilt2d(
some_nans, kernel_size=3, conditional=False, mode='reflect')
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 20)
self.assertEqual(output[1, 1], 20)
def testFilter3_1d(self):
"""Test binding and result of the 1d filter"""
self.assertTrue(numpy.array_equal(
medfilt1d(RANDOM_INT_MAT[0], kernel_size=5, conditional=False,
mode='reflect'),
[2, 2, 2, 2, 2])
)
class TestMedianFilterMirror(ParametricTestCase):
"""Unit test for the median filter in mirror mode
"""
def testApplyMirror1D(self):
"""Test the reflect function used for the median filter in mirror mode
"""
# test for inside values
self.assertTrue(mirror(2, 3) == 2)
# test for boundaries values
self.assertTrue(mirror(4, 4) == 2)
self.assertTrue(mirror(5, 4) == 1)
self.assertTrue(mirror(6, 4) == 0)
self.assertTrue(mirror(7, 4) == 1)
self.assertTrue(mirror(8, 4) == 2)
self.assertTrue(mirror(-1, 4) == 1)
self.assertTrue(mirror(-2, 4) == 2)
self.assertTrue(mirror(-3, 4) == 3)
self.assertTrue(mirror(-4, 4) == 2)
self.assertTrue(mirror(-5, 4) == 1)
self.assertTrue(mirror(-6, 4) == 0)
def testRandom10(self):
"""Test a (5, 3) window to a random array"""
kernel = (3, 5)
thRes = numpy.array([
[0.05272484, 0.40555336, 0.42161216, 0.42161216, 0.42161216],
[0.56049024, 0.56049024, 0.4440632, 0.4440632, 0.4440632],
[0.56049024, 0.46372299, 0.46372299, 0.46372299, 0.46372299],
[0.68382382, 0.56049024, 0.56049024, 0.46372299, 0.56049024],
[0.68382382, 0.46372299, 0.68382382, 0.46372299, 0.68382382]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=False,
mode='mirror')
self.assertTrue(numpy.array_equal(thRes, res))
def testRandom10Conditionnal(self):
"""Test the median filter in reflect mode and with the conditionnal
option"""
kernel = (1, 3)
thRes = numpy.array([
[0.62717157, 0.62717157, 0.62717157, 0.70278975, 0.40555336],
[0.02839148, 0.05272484, 0.05272484, 0.42161216, 0.65166994],
[0.74219128, 0.56049024, 0.56049024, 0.44406320, 0.44406320],
[0.20303021, 0.68382382, 0.46372299, 0.68382382, 0.46372299],
[0.07813661, 0.81651256, 0.81651256, 0.81651256, 0.84220106]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=True,
mode='mirror')
self.assertTrue(numpy.array_equal(thRes, res))
def testNaNs(self):
"""Test median filter on image with NaNs in mirror mode"""
# Data with a NaN in first corner
nan_corner = numpy.arange(100.).reshape(10, 10)
nan_corner[0, 0] = numpy.nan
output = medfilt2d(
nan_corner, kernel_size=3, conditional=False, mode='mirror')
self.assertEqual(output[0, 0], 11)
self.assertEqual(output[0, 1], 11)
self.assertEqual(output[1, 0], 11)
self.assertEqual(output[1, 1], 12)
# Data with some NaNs
some_nans = numpy.arange(100.).reshape(10, 10)
some_nans[0, 1] = numpy.nan
some_nans[1, 1] = numpy.nan
some_nans[1, 0] = numpy.nan
output = medfilt2d(
some_nans, kernel_size=3, conditional=False, mode='mirror')
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 12)
self.assertEqual(output[1, 0], 21)
self.assertEqual(output[1, 1], 20)
def testFilter3_1d(self):
"""Test binding and result of the 1d filter"""
self.assertTrue(numpy.array_equal(
medfilt1d(RANDOM_INT_MAT[0], kernel_size=5, conditional=False,
mode='mirror'),
[2, 5, 2, 5, 2])
)
class TestMedianFilterShrink(ParametricTestCase):
"""Unit test for the median filter in mirror mode
"""
def testRandom_3x3(self):
"""Test the median filter in shrink mode and with the conditionnal
option"""
kernel = (3, 3)
thRes = numpy.array([
[0.62717157, 0.62717157, 0.62717157, 0.65166994, 0.65166994],
[0.62717157, 0.56049024, 0.56049024, 0.44406320, 0.44406320],
[0.74219128, 0.56049024, 0.46372299, 0.46372299, 0.46372299],
[0.74219128, 0.68382382, 0.56049024, 0.56049024, 0.46372299],
[0.81025249, 0.81025249, 0.68382382, 0.81281709, 0.81281709]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=False,
mode='shrink')
self.assertTrue(numpy.array_equal(thRes, res))
def testBounds(self):
"""Test the median filter in shrink mode with 3 different kernels
which should return the same result due to the large values of kernels
used.
"""
kernel1 = (1, 9)
kernel2 = (1, 11)
kernel3 = (1, 21)
thRes = numpy.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2],
[8, 8, 8, 8, 8],
[5, 5, 5, 5, 5]])
resK1 = medfilt2d(image=RANDOM_INT_MAT,
kernel_size=kernel1,
conditional=False,
mode='shrink')
resK2 = medfilt2d(image=RANDOM_INT_MAT,
kernel_size=kernel2,
conditional=False,
mode='shrink')
resK3 = medfilt2d(image=RANDOM_INT_MAT,
kernel_size=kernel3,
conditional=False,
mode='shrink')
self.assertTrue(numpy.array_equal(resK1, thRes))
self.assertTrue(numpy.array_equal(resK2, resK1))
self.assertTrue(numpy.array_equal(resK3, resK1))
def testRandom_3x3Conditionnal(self):
"""Test the median filter in reflect mode and with the conditionnal
option"""
kernel = (3, 3)
thRes = numpy.array([
[0.05564293, 0.62717157, 0.62717157, 0.40555336, 0.65166994],
[0.62717157, 0.56049024, 0.05272484, 0.65166994, 0.42161216],
[0.23067427, 0.74219128, 0.56049024, 0.44406320, 0.46372299],
[0.81025249, 0.20303021, 0.68382382, 0.46372299, 0.81281709],
[0.81025249, 0.81025249, 0.81651256, 0.81281709, 0.81281709]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=True,
mode='shrink')
self.assertTrue(numpy.array_equal(res, thRes))
def testRandomInt(self):
"""Test 3x3 kernel on RANDOM_INT_MAT
"""
kernel = (3, 3)
thRes = numpy.array([[3, 2, 5, 2, 6],
[5, 3, 6, 6, 7],
[6, 6, 6, 6, 7],
[8, 8, 7, 7, 7]])
resK1 = medfilt2d(image=RANDOM_INT_MAT,
kernel_size=kernel,
conditional=False,
mode='shrink')
self.assertTrue(numpy.array_equal(resK1, thRes))
def testNaNs(self):
"""Test median filter on image with NaNs in shrink mode"""
# Data with a NaN in first corner
nan_corner = numpy.arange(100.).reshape(10, 10)
nan_corner[0, 0] = numpy.nan
output = medfilt2d(
nan_corner, kernel_size=3, conditional=False, mode='shrink')
self.assertEqual(output[0, 0], 10)
self.assertEqual(output[0, 1], 10)
self.assertEqual(output[1, 0], 11)
self.assertEqual(output[1, 1], 12)
# Data with some NaNs
some_nans = numpy.arange(100.).reshape(10, 10)
some_nans[0, 1] = numpy.nan
some_nans[1, 1] = numpy.nan
some_nans[1, 0] = numpy.nan
output = medfilt2d(
some_nans, kernel_size=3, conditional=False, mode='shrink')
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 20)
self.assertEqual(output[1, 1], 20)
def testFilter3_1d(self):
"""Test binding and result of the 1d filter"""
self.assertTrue(numpy.array_equal(
medfilt1d(RANDOM_INT_MAT[0], kernel_size=3, conditional=False,
mode='shrink'),
[5, 2, 5, 2, 6])
)
class TestMedianFilterConstant(ParametricTestCase):
"""Unit test for the median filter in constant mode
"""
def testRandom10(self):
"""Test a (5, 3) window to a random array"""
kernel = (3, 5)
thRes = numpy.array([
[0., 0.02839148, 0.05564293, 0.02839148, 0.],
[0.05272484, 0.40555336, 0.4440632, 0.42161216, 0.28773158],
[0.05272484, 0.44406320, 0.46372299, 0.42161216, 0.28773158],
[0.20303021, 0.46372299, 0.56049024, 0.44406320, 0.33623165],
[0., 0.07813661, 0.33623165, 0.07813661, 0.]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=False,
mode='constant')
self.assertTrue(numpy.array_equal(thRes, res))
RANDOM_FLOAT_MAT = numpy.array([
[0.05564293, 0.62717157, 0.75002406, 0.40555336, 0.70278975],
[0.76532598, 0.02839148, 0.05272484, 0.65166994, 0.42161216],
[0.23067427, 0.74219128, 0.56049024, 0.44406320, 0.28773158],
[0.81025249, 0.20303021, 0.68382382, 0.46372299, 0.81281709],
[0.94691602, 0.07813661, 0.81651256, 0.84220106, 0.33623165]])
def testRandom10Conditionnal(self):
"""Test the median filter in reflect mode and with the conditionnal
option"""
kernel = (1, 3)
print(RANDOM_FLOAT_MAT)
thRes = numpy.array([
[0.05564293, 0.62717157, 0.62717157, 0.70278975, 0.40555336],
[0.02839148, 0.05272484, 0.05272484, 0.42161216, 0.42161216],
[0.23067427, 0.56049024, 0.56049024, 0.44406320, 0.28773158],
[0.20303021, 0.68382382, 0.46372299, 0.68382382, 0.46372299],
[0.07813661, 0.81651256, 0.81651256, 0.81651256, 0.33623165]])
res = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=True,
mode='constant')
self.assertTrue(numpy.array_equal(thRes, res))
def testNaNs(self):
"""Test median filter on image with NaNs in constant mode"""
# Data with a NaN in first corner
nan_corner = numpy.arange(100.).reshape(10, 10)
nan_corner[0, 0] = numpy.nan
output = medfilt2d(nan_corner,
kernel_size=3,
conditional=False,
mode='constant',
cval=0)
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 2)
self.assertEqual(output[1, 0], 10)
self.assertEqual(output[1, 1], 12)
# Data with some NaNs
some_nans = numpy.arange(100.).reshape(10, 10)
some_nans[0, 1] = numpy.nan
some_nans[1, 1] = numpy.nan
some_nans[1, 0] = numpy.nan
output = medfilt2d(some_nans,
kernel_size=3,
conditional=False,
mode='constant',
cval=0)
self.assertEqual(output[0, 0], 0)
self.assertEqual(output[0, 1], 0)
self.assertEqual(output[1, 0], 0)
self.assertEqual(output[1, 1], 20)
def testFilter3_1d(self):
"""Test binding and result of the 1d filter"""
self.assertTrue(numpy.array_equal(
medfilt1d(RANDOM_INT_MAT[0], kernel_size=5, conditional=False,
mode='constant'),
[0, 2, 2, 2, 1])
)
class TestGeneralExecution(ParametricTestCase):
"""Some general test on median filter application"""
def testTypes(self):
"""Test that all needed types have their implementation of the median
filter
"""
for mode in silx_mf_modes:
for testType in [numpy.float32, numpy.float64, numpy.int16,
numpy.uint16, numpy.int32, numpy.int64,
numpy.uint64]:
with self.subTest(mode=mode, type=testType):
data = (numpy.random.rand(10, 10) * 65000).astype(testType)
out = medfilt2d(image=data,
kernel_size=(3, 3),
conditional=False,
mode=mode)
self.assertTrue(out.dtype.type is testType)
def testInputDataIsNotModify(self):
"""Make sure input data is not modify by the median filter"""
dataIn = numpy.arange(100, dtype=numpy.int32)
dataIn = dataIn.reshape((10, 10))
dataInCopy = dataIn.copy()
for mode in silx_mf_modes:
with self.subTest(mode=mode):
medfilt2d(image=dataIn,
kernel_size=(3, 3),
conditional=False,
mode=mode)
self.assertTrue(numpy.array_equal(dataIn, dataInCopy))
def testAllNaNs(self):
"""Test median filter on image all NaNs"""
all_nans = numpy.empty((10, 10), dtype=numpy.float32)
all_nans[:] = numpy.nan
for mode in silx_mf_modes:
for conditional in (True, False):
with self.subTest(mode=mode, conditional=conditional):
output = medfilt2d(
all_nans,
kernel_size=3,
conditional=conditional,
mode=mode,
cval=numpy.nan)
self.assertTrue(numpy.all(numpy.isnan(output)))
def testConditionalWithNaNs(self):
"""Test that NaNs are propagated through conditional median filter"""
for mode in silx_mf_modes:
with self.subTest(mode=mode):
image = numpy.ones((10, 10), dtype=numpy.float32)
nan_mask = numpy.zeros_like(image, dtype=bool)
nan_mask[0, 0] = True
nan_mask[4, :] = True
nan_mask[6, 4] = True
image[nan_mask] = numpy.nan
output = medfilt2d(
image,
kernel_size=3,
conditional=True,
mode=mode)
out_isnan = numpy.isnan(output)
self.assertTrue(numpy.all(out_isnan[nan_mask]))
self.assertFalse(
numpy.any(out_isnan[numpy.logical_not(nan_mask)]))
def _getScipyAndSilxCommonModes():
"""return the mode which are comparable between silx and scipy"""
modes = silx_mf_modes.copy()
del modes['shrink']
return modes
@unittest.skipUnless(scipy is not None, "scipy not available")
class TestVsScipy(ParametricTestCase):
"""Compare scipy.ndimage.median_filter vs silx.math.medianfilter
on comparable
"""
def testWithArange(self):
"""Test vs scipy with different kernels on arange matrix"""
data = numpy.arange(10000, dtype=numpy.int32)
data = data.reshape(100, 100)
kernels = [(3, 7), (7, 5), (1, 1), (3, 3)]
modesToTest = _getScipyAndSilxCommonModes()
for kernel in kernels:
for mode in modesToTest:
with self.subTest(kernel=kernel, mode=mode):
resScipy = scipy.ndimage.median_filter(input=data,
size=kernel,
mode=mode)
resSilx = medfilt2d(image=data,
kernel_size=kernel,
conditional=False,
mode=mode)
self.assertTrue(numpy.array_equal(resScipy, resSilx))
def testRandomMatrice(self):
"""Test vs scipy with different kernels on RANDOM_FLOAT_MAT"""
kernels = [(3, 7), (7, 5), (1, 1), (3, 3)]
modesToTest = _getScipyAndSilxCommonModes()
for kernel in kernels:
for mode in modesToTest:
with self.subTest(kernel=kernel, mode=mode):
resScipy = scipy.ndimage.median_filter(input=RANDOM_FLOAT_MAT,
size=kernel,
mode=mode)
resSilx = medfilt2d(image=RANDOM_FLOAT_MAT,
kernel_size=kernel,
conditional=False,
mode=mode)
self.assertTrue(numpy.array_equal(resScipy, resSilx))
def testAscent(self):
"""Test vs scipy with """
img = ascent()
kernels = [(3, 1), (3, 5), (5, 9), (9, 3)]
modesToTest = _getScipyAndSilxCommonModes()
for kernel in kernels:
for mode in modesToTest:
with self.subTest(kernel=kernel, mode=mode):
resScipy = scipy.ndimage.median_filter(input=img,
size=kernel,
mode=mode)
resSilx = medfilt2d(image=img,
kernel_size=kernel,
conditional=False,
mode=mode)
self.assertTrue(numpy.array_equal(resScipy, resSilx))
|
13bcba5b111d3b341b007e925a4db50f076a0d5d
|
9e9e0985789b51210c7fe315ae98949de8b23469
|
/tests/parser/functions/test_minmax_value.py
|
033381f59f8e6b0456bf54f2bff1d09ab3f99f5c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
vyperlang/vyper
|
ea614230edccf3424aad746f66874e1f32c55c57
|
158099b9c1a49b5472293c1fb7a4baf3cd015eb5
|
refs/heads/master
| 2023-08-30T02:18:48.923346
| 2023-08-27T02:59:27
| 2023-08-27T02:59:27
| 73,461,676
| 2,359
| 469
|
NOASSERTION
| 2023-09-14T13:05:24
| 2016-11-11T08:56:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
test_minmax_value.py
|
import pytest
from vyper.exceptions import InvalidType, OverflowException
from vyper.semantics.types import DecimalT, IntegerT
from vyper.semantics.types.shortcuts import INT256_T, UINT256_T
@pytest.mark.parametrize("typ", sorted(IntegerT.all() + (DecimalT(),)))
@pytest.mark.parametrize("op", ("min_value", "max_value"))
def test_minmax_value(get_contract, op, typ):
code = f"""
@external
def foo() -> {typ}:
return {op}({typ})
"""
c = get_contract(code)
lo, hi = typ.ast_bounds
if op == "min_value":
assert c.foo() == lo
elif op == "max_value":
assert c.foo() == hi
@pytest.mark.parametrize("typ", sorted(IntegerT.all()))
def test_minmax_value_int_oob(get_contract, assert_compile_failed, typ):
upper = f"""
@external
def foo():
a: {typ} = max_value({typ}) + 1
"""
lower = f"""
@external
def foo():
a: {typ} = min_value({typ}) - 1
"""
if typ == UINT256_T:
assert_compile_failed(lambda: get_contract(upper), OverflowException)
else:
assert_compile_failed(lambda: get_contract(upper), InvalidType)
if typ == INT256_T:
assert_compile_failed(lambda: get_contract(lower), OverflowException)
else:
assert_compile_failed(lambda: get_contract(lower), InvalidType)
@pytest.mark.parametrize("typ", [DecimalT()])
def test_minmax_value_decimal_oob(get_contract, assert_compile_failed, typ):
upper = f"""
@external
def foo():
a: {typ} = max_value({typ}) + 1e-10
"""
lower = f"""
@external
def foo():
a: {typ} = min_value({typ}) - 1e-10
"""
assert_compile_failed(lambda: get_contract(upper), OverflowException)
assert_compile_failed(lambda: get_contract(lower), OverflowException)
|
78799b5b60eebf72125c698cf23cffbcb8a75355
|
659e79c16e072e77481de388481e06be6b087247
|
/openfda/spl/process_barcodes.py
|
3a92938cfdb22990318d2243b70511929793c04f
|
[
"CC0-1.0"
] |
permissive
|
FDA/openfda
|
731e7903b3dadcf01bb604008bf55b3c120931a7
|
01a7357aeb69fc8245a591b24ac79bd39de49027
|
refs/heads/master
| 2023-08-05T15:23:59.843665
| 2022-05-17T15:53:08
| 2022-05-17T15:53:08
| 20,379,117
| 438
| 117
|
CC0-1.0
| 2022-12-26T21:35:23
| 2014-06-01T14:43:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
process_barcodes.py
|
#!/usr/local/bin/python
from io import StringIO
import logging
import io
import xml.parsers.expat
from os.path import basename
import simplejson as json
import xmltodict
from openfda.common import strip_unicode
XML_ESCAPE_CHARS = {
'"': '"',
'\'': ''',
'<': '<',
'>': '>',
'&': '&'
}
BARCODE_TYPES = ['EAN-13']
def XML2JSON(input_file):
rows = []
def handle_barcode(_, barcode):
if isinstance(barcode, dict):
row_dict = {}
try:
all_symbols = []
symbols = barcode['source']['index']['symbol']
if isinstance(symbols, list):
all_symbols.extend(symbols)
else:
all_symbols.append(symbols)
symbol = next((x for x in all_symbols if x['@type'] in BARCODE_TYPES), None)
if symbol:
href = barcode['source']['@href']
barcode_type = symbol['@type']
quality = symbol['@quality']
data = symbol['data']
row_dict['id'] = basename(href)
row_dict['barcode_type'] = barcode_type
row_dict['quality'] = quality
row_dict['upc'] = data
rows.append(row_dict)
except KeyError:
pass
return True
def escape_xml(raw_xml):
# zbar doesn't do xml escaping, so we have to do it here before we hand the
# xml off to a parser
lines = []
#need to handle non-escaped barcodes that have no source
#if the current line is a barcodes and the previous one was too
#then prefix the line with a closing barcodes
previous_line_is_barcodes = False
for line in raw_xml.split('\n'):
if '<barcodes' in line and previous_line_is_barcodes:
line = '</barcodes>' + line
if '<barcodes' in line:
previous_line_is_barcodes = True
if 'source href' in line and previous_line_is_barcodes:
previous_line_is_barcodes = False
if 'source href' in line:
href = line.replace("<source href='", '').replace("'>", '')
href = href.split('/')[:-1]
href = '/'.join(href)
line = "<source href='" + href + "'>"
line = line.strip()
if line:
lines.append(line)
if len(lines) > 0 and '/barcodes' not in lines[-1]:
lines.append('</barcodes>')
return '<allbarcodes>\n' + '\n'.join(lines) + '\n</allbarcodes>'
out_file = input_file.replace('.xml', '.json')
out = open(out_file, 'w')
escaped_xml = StringIO(escape_xml(io.open(input_file, encoding="utf-8").read()))
try:
xmltodict.parse(strip_unicode(escaped_xml.getvalue(), True), item_depth=2, item_callback=handle_barcode)
except xml.parsers.expat.ExpatError as ee:
logging.info('Error parsing barcode XML file %s', input_file)
logging.debug(ee)
for row in rows:
out.write(json.dumps(row) + '\n')
|
23604576ca4ee715caf815360fd108c184bbebd1
|
ca35ba3894b46c7c851ac12ba4723d5b9714102b
|
/tests/test_templates.py
|
3d05d64777f613046b5f24aeebda8e9513020080
|
[
"MIT",
"0BSD"
] |
permissive
|
pyscaffold/pyscaffold
|
4ec2fd57938e3e9d159bd9289c6b41c2feecde91
|
14ff8554f25c83845687315c0a251048e76784ba
|
refs/heads/master
| 2023-08-30T21:55:56.370277
| 2023-06-20T15:54:36
| 2023-06-20T15:54:36
| 18,357,535
| 1,479
| 159
|
NOASSERTION
| 2023-06-28T04:37:01
| 2014-04-02T07:01:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
test_templates.py
|
import sys
from configparser import ConfigParser
from pathlib import Path
import pytest
from pyscaffold import actions, api
from pyscaffold import dependencies as deps
from pyscaffold import info, templates
def test_get_template():
template = templates.get_template("setup_py")
content = template.safe_substitute()
assert content.split("\n", 1)[0] == '"""'
@pytest.fixture
def tmp_python_path(tmp_path):
sys.path.append(str(tmp_path))
yield tmp_path
sys.path.remove(str(tmp_path))
def test_get_template_relative_to(tmp_python_path):
# Given a template exists inside a package
parent = tmp_python_path / "pkg4test"
pkg = tmp_python_path / "pkg4test" / "asdf42_123456"
pkg.mkdir(parents=True, exist_ok=True)
(parent / "__init__.py").touch(exist_ok=True)
(parent / "ex1.template").write_text("${var1}")
(pkg / "__init__.py").touch(exist_ok=True)
(pkg / "ex2.template").write_text("${var2}")
# When using "relative_to" with __name__
import pkg4test
tpl1 = templates.get_template("ex1", relative_to=pkg4test.__name__)
content = tpl1.safe_substitute({"var1": "Hello World!"})
# Then get_template should work
assert content == "Hello World!"
# When using "relative_to" with a module
import pkg4test
tpl1 = templates.get_template("ex1", relative_to=pkg4test)
content = tpl1.safe_substitute({"var1": "Some World!"})
# Then get_template should work
assert content == "Some World!"
# When using "relative_to" with a package name string
tpl2 = templates.get_template("ex2", relative_to="pkg4test.asdf42_123456")
content = tpl2.safe_substitute({"var2": "Bye bye World!"})
# Then get_template should work
assert content == "Bye bye World!"
def test_all_licenses():
opts = {
"email": "test@user",
"name": "my_project",
"author": "myself",
"year": 1832,
}
for license in templates.licenses.keys():
opts["license"] = license
assert templates.license(opts)
def test_setup_cfg():
reqs = ("mydep1>=789.8.1", "mydep3<=90009;python_version>'3.5'", "other")
opts = api.bootstrap_options({"project_path": "myproj", "requirements": reqs})
_, opts = actions.get_default_options({}, opts)
text = templates.setup_cfg(opts)
setup_cfg = ConfigParser()
setup_cfg.read_string(text)
# Assert install_requires is correctly assigned
install_requires = deps.split(setup_cfg["options"]["install_requires"])
for dep in reqs:
assert dep in install_requires
# Assert PyScaffold section
assert setup_cfg["pyscaffold"].get("version")
def test_setup_cfg_2line_description(tmpfolder):
# When a 2 line description is found (e.g. by reading an existing setup.cfg file)
_, opts = actions.get_default_options({}, {"project_path": tmpfolder})
opts["description"] = "2 line\ndescription"
# Then the rendered template should still be valid
text = templates.setup_cfg(opts)
setup_cfg = ConfigParser()
setup_cfg.read_string(text)
assert setup_cfg["metadata"]["description"].strip() == "2 line\ndescription"
Path(tmpfolder, "setup.cfg").write_text(text)
opts = info.project({})
assert opts["description"].strip() == "2 line\ndescription"
|
aadb604d8fa9a0b01777c367eb31c7bbdbbb8ce7
|
dec5a11d95f7b87da9985362dca4dc573a8bd805
|
/tdda/constraints/examples/accounts_verify_1k.py
|
d6a1fd5e017d396f979d343cdc8d4ffb8ec37101
|
[
"MIT"
] |
permissive
|
tdda/tdda
|
37d37e51c13362e65af07fe81708bb126fa568eb
|
08e1ec6d7397f2b0f527ac59698180ba54e53814
|
refs/heads/master
| 2023-02-19T00:08:40.983473
| 2023-02-10T19:49:27
| 2023-02-10T19:49:27
| 58,143,323
| 275
| 34
|
MIT
| 2019-07-02T14:44:33
| 2016-05-05T16:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
accounts_verify_1k.py
|
# accounts_verify_1k.py
from __future__ import print_function
import pandas as pd
from tdda.constraints.pd.constraints import verify_df
df = pd.read_csv('testdata/accounts1k.csv')
print(verify_df(df, 'accounts1k.tdda'))
|
dd4dbc9be224245428f060a752f65168da6a26fb
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/data/region_AU.py
|
f608a4b46ab5a8e0b14ae049ed83a3e157b9511c
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,187
|
py
|
region_AU.py
|
"""Auto-generated file, do not edit by hand. AU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AU = PhoneMetadata(id='AU', country_code=61, international_prefix='001[14-689]|14(?:1[14]|34|4[17]|[56]6|7[47]|88)0011',
general_desc=PhoneNumberDesc(national_number_pattern='1(?:[0-79]\\d{7}(?:\\d(?:\\d{2})?)?|8[0-24-9]\\d{7})|[2-478]\\d{8}|1\\d{4,7}', possible_length=(5, 6, 7, 8, 9, 10, 12)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:(?:2(?:[0-26-9]\\d|3[0-8]|4[02-9]|5[0135-9])|3(?:[0-3589]\\d|4[0-578]|6[1-9]|7[0-35-9])|7(?:[013-57-9]\\d|2[0-8]))\\d{3}|8(?:51(?:0(?:0[03-9]|[12479]\\d|3[2-9]|5[0-8]|6[1-9]|8[0-7])|1(?:[0235689]\\d|1[0-69]|4[0-589]|7[0-47-9])|2(?:0[0-79]|[18][13579]|2[14-9]|3[0-46-9]|[4-6]\\d|7[89]|9[0-4]))|(?:6[0-8]|[78]\\d)\\d{3}|9(?:[02-9]\\d{3}|1(?:(?:[0-58]\\d|6[0135-9])\\d|7(?:0[0-24-9]|[1-9]\\d)|9(?:[0-46-9]\\d|5[0-79])))))\\d{3}', example_number='212345678', possible_length=(9,), possible_length_local_only=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='4(?:(?:79|94)[01]|83[0-389])\\d{5}|4(?:[0-3]\\d|4[047-9]|5[0-25-9]|6[016-9]|7[02-8]|8[0-24-9]|9[0-37-9])\\d{6}', example_number='412345678', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='180(?:0\\d{3}|2)\\d{3}', example_number='1800123456', possible_length=(7, 10)),
premium_rate=PhoneNumberDesc(national_number_pattern='190[0-26]\\d{6}', example_number='1900123456', possible_length=(10,)),
shared_cost=PhoneNumberDesc(national_number_pattern='13(?:00\\d{6}(?:\\d{2})?|45[0-4]\\d{3})|13\\d{4}', example_number='1300123456', possible_length=(6, 8, 10, 12)),
voip=PhoneNumberDesc(national_number_pattern='14(?:5(?:1[0458]|[23][458])|71\\d)\\d{4}', example_number='147101234', possible_length=(9,)),
pager=PhoneNumberDesc(national_number_pattern='163\\d{2,6}', example_number='1631234', possible_length=(5, 6, 7, 8, 9)),
no_international_dialling=PhoneNumberDesc(national_number_pattern='1(?:3(?:00\\d{5}|45[0-4])|802)\\d{3}|1[38]00\\d{6}|13\\d{4}', possible_length=(6, 7, 8, 10, 12)),
preferred_international_prefix='0011',
national_prefix='0',
national_prefix_for_parsing='(183[12])|0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3,4})', format='\\1 \\2', leading_digits_pattern=['16'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3', leading_digits_pattern=['13']),
NumberFormat(pattern='(\\d{3})(\\d{3})', format='\\1 \\2', leading_digits_pattern=['19']),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['180', '1802']),
NumberFormat(pattern='(\\d{4})(\\d{3,4})', format='\\1 \\2', leading_digits_pattern=['19']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2,4})', format='\\1 \\2 \\3', leading_digits_pattern=['16'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['14|4'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2378]'], national_prefix_formatting_rule='(0\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(\\d{4})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['1(?:30|[89])']),
NumberFormat(pattern='(\\d{4})(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['130'])],
intl_number_format=[NumberFormat(pattern='(\\d{2})(\\d{3,4})', format='\\1 \\2', leading_digits_pattern=['16']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2,4})', format='\\1 \\2 \\3', leading_digits_pattern=['16']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['14|4']),
NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2378]']),
NumberFormat(pattern='(\\d{4})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['1(?:30|[89])'])],
main_country_for_code=True,
mobile_number_portable_region=True)
|
8c846db5a27f25fd23349da82276a84c8b16d7f8
|
d2621d10d6d0aa4fcecbb11c281e3dd680b985fc
|
/examples/image_classifier/resnet_18/model.py
|
e3d61bc5d5fa196b1ea4a1f1dc5664e93944801e
|
[
"Apache-2.0"
] |
permissive
|
pytorch/serve
|
7b562a4d6372e77ce28fc71a5b8d5455c6f02290
|
242895c6b4596c4119ec09d6139e627c5dd696b6
|
refs/heads/master
| 2023-08-31T05:24:10.950144
| 2023-08-31T02:49:22
| 2023-08-31T02:49:22
| 212,488,700
| 3,689
| 895
|
Apache-2.0
| 2023-09-13T22:34:31
| 2019-10-03T03:17:43
|
Java
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
model.py
|
from torchvision.models.resnet import ResNet, BasicBlock
class ImageClassifier(ResNet):
def __init__(self):
super(ImageClassifier, self).__init__(BasicBlock, [2, 2, 2, 2])
|
bcfc936974484b1e4539a1af6a4aa93d6777ce30
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CommonScripts/Scripts/DeduplicateValuesbyKey/DeduplicateValuesbyKey_test.py
|
91a1e1f446fa8d08eb794e55279d32f93858a84a
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
DeduplicateValuesbyKey_test.py
|
import pytest
def test_generate_unique_values_from_objects():
from DeduplicateValuesbyKey import generate_unique_values_from_objects
objects = [
{
"key": "value1",
"value": "value1"
},
{
"key": "value1",
"value": "value2"
},
{
"key": "value2",
"value": "value3"
},
{
"key": "value2",
"value": "value4"
},
{
"key": "value3",
"value": "value5"
},
{
"key": "value3",
"value": "value6"
}]
values = generate_unique_values_from_objects(objects, "key", False)
assert set(values) == set(["value1", "value2", "value3"])
def test_generate_unique_values_from_objects_with_none():
from DeduplicateValuesbyKey import generate_unique_values_from_objects
objects = [
{
"key": "value1",
"value": "value1"
},
{
"key": "value1",
"value": "value2"
},
{
"key": "value2",
"value": "value3"
},
{
"key": "value2",
"value": "value4"
},
{
"key": "value3",
"value": "value5"
},
{
"key": "None_value",
"value": None
}]
values = generate_unique_values_from_objects(objects, "key", True)
assert set(values) == set(["None_value", "value1", "value2", "value3"])
def test_generate_unique_values_from_objects_fail():
from DeduplicateValuesbyKey import generate_unique_values_from_objects
with pytest.raises(SystemExit):
generate_unique_values_from_objects([], "key", True)
|
c0a2de15ac5ad6d000cfeca19d1470f369271bb1
|
8c0b804f1cc8cbf2f8788727df22a2cc149f7b5c
|
/gala/dynamics/mockstream/mockstream_generator.py
|
6b1bef07d06c12634176560434745c6ef1e16ad1
|
[
"MIT"
] |
permissive
|
adrn/gala
|
579cc5a4ecb22df118e1c8a2322a46e935825054
|
f62e1a6ae7a8466a4db5c8407471b524cf085637
|
refs/heads/main
| 2023-09-04T11:42:07.278388
| 2023-08-18T18:04:35
| 2023-08-18T18:04:35
| 17,577,779
| 115
| 89
|
MIT
| 2023-09-05T11:40:10
| 2014-03-10T00:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 12,192
|
py
|
mockstream_generator.py
|
# Third-party
import numpy as np
# This package
from .. import combine, PhaseSpacePosition
from ..nbody import DirectNBody
from ...potential import Hamiltonian, PotentialBase
from ...integrate.timespec import parse_time_specification
from ._mockstream import mockstream_dop853, mockstream_dop853_animate
from .core import MockStream
__all__ = ["MockStreamGenerator"]
class MockStreamGenerator:
def __init__(self, df, hamiltonian, progenitor_potential=None):
"""Generate a mock stellar stream in the specified external potential.
By default, you must pass in a specification of the stream distribution
function (``df``), and the external gravitational potential and
reference frame (via a `~gala.potential.Hamiltonian` object passed in
through the ``hamiltonian`` argument).
Also by default, the stream generation does not include the self-gravity
of the progenitor system: star particles are generated using the ``df``
object, and released into the external potential specified by the
``hamiltonian``. If you would like the star particles to feel the
gravitational field of the progenitor system, you may pass in a
potential object to represent the progenitor via the
``progenitor_potential`` argument. This can be any valid gala potential
instance.
Parameters
----------
df : `~gala.dynamics.BaseStreamDF` subclass instance
The stream distribution function (DF) object that specifies how to
generate stream star particle initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
The external potential and reference frame to numerically integrate
orbits in.
progenitor_potential : `~gala.potential.PotentialBase` (optional)
If specified, the self-gravity of the progenitor system is included
in the force calculation and orbit integration. If not specified,
self-gravity is not accounted for. Default: ``None``
"""
from .df import BaseStreamDF
if not isinstance(df, BaseStreamDF):
raise TypeError(
"The input distribution function (DF) instance "
"must be an instance of a subclass of "
"BaseStreamDF, not {}.".format(type(df))
)
self.df = df
# Validate the inpute hamiltonian
self.hamiltonian = Hamiltonian(hamiltonian)
if progenitor_potential is not None:
# validate the potential class
if not isinstance(progenitor_potential, PotentialBase):
raise TypeError(
"If specified, the progenitor_potential must be a gala.potential "
"class instance."
)
self.self_gravity = True
else:
self.self_gravity = False
self.progenitor_potential = progenitor_potential
def _get_nbody(self, prog_w0, nbody):
"""
Internal function that adds the progenitor to the list of nbody objects to
integrate along with the test particles in the stream.
"""
kwargs = dict()
if nbody is not None:
if nbody.external_potential != self.hamiltonian.potential:
raise ValueError(
"The external potential of the input nbody instance must match the "
"potential of the mock stream input hamiltonian! "
f"{nbody.external_potential} vs. {self.hamiltonian.potential}"
)
if nbody.frame != self.hamiltonian.frame:
raise ValueError(
"The reference frame of the input nbody instance must match the "
"frame of the mock stream input hamiltonian! "
f"{nbody.frame} vs. {self.hamiltonian.frame}"
)
kwargs["w0"] = combine((prog_w0, nbody.w0))
kwargs["particle_potentials"] = [
self.progenitor_potential
] + nbody.particle_potentials
kwargs["external_potential"] = self.hamiltonian.potential
kwargs["frame"] = self.hamiltonian.frame
kwargs["units"] = self.hamiltonian.units
kwargs["save_all"] = nbody.save_all
else:
kwargs["w0"] = prog_w0
kwargs["particle_potentials"] = [self.progenitor_potential]
kwargs["external_potential"] = self.hamiltonian.potential
kwargs["frame"] = self.hamiltonian.frame
kwargs["units"] = self.hamiltonian.units
return DirectNBody(**kwargs)
def run(
self,
prog_w0,
prog_mass,
nbody=None,
release_every=1,
n_particles=1,
output_every=None,
output_filename=None,
check_filesize=True,
overwrite=False,
progress=False,
**time_spec
):
"""
Run the mock stream generator with the specified progenitor initial conditions.
This method generates the mock stellar stream for the specified progenitor
system properties. The progenitor orbit is specified by passing in the initial
or final conditions ``prog_w0`` and by specifying time-stepping information via
the ``**time_spec`` keyword arguments. If the time-stepping specification
proceeds forward in time, ``prog_w0`` is interpreted as initial conditions and
the mock stream is generated forwards from this position. If the time-stepping
proceeds backwards in time, the progenitor orbit is first numerically integrated
backwards given the time-stepping information, then the stream is generated
forward from the past such that ``prog_w0`` becomes the final position of the
progenitor.
Note that the stream generation also supports including other massive perturbers
that can gravitationally influence the stream stars. These other massive bodies
must be passed in as a `~gala.dynamics.DirectNBody` instance through the
``nbody`` argument. The phase-space coordinates of the bodies, ``nbody.w0``, are
interpreted as initial or final conditions with the same logic as above.
Parameters
----------
prog_w0 : `~gala.dynamics.PhaseSpacePosition`
The initial or final phase-space position of the progenitor system (see note
above).
prog_mass : `~astropy.units.Quantity` [mass]
The mass of the progenitor system, passed in to the stream distribution
function (df) ``.sample()`` method. This quantity sets the scale mass of the
particle release df, but not the mass of the progenitor potential used to
compute the self-gravity on the stream particles.
nbody : `~gala.dynamics.DirectNBody` (optional)
This allows specifying other massive perturbers (N-bodies) that can
gravitationally influence the stream star orbits.
release_every : int (optional)
Controls how often to release stream particles from each tail. Default: 1,
meaning release particles at each timestep.
n_particles : int, array_like (optional)
If an integer, this controls the number of particles to release in each tail
at each release timestep. Alternatively, you can pass in an array with the
same shape as the number of timesteps to release bursts of particles at
certain times (e.g., pericenter).
output_every : int (optional)
Controls whether to output snapshots of the stream particle orbits. This is
relative to the global time array.
output_filename : str (optional)
The path to the HDF5 file to be generated by the snapshotting.
check_filesize : bool (optional)
If True (the default value), this controls whether to check the estimated
size of the output file, and emits a warning if the file is >8GB in size.
overwrite : bool (optional)
Overwrite the output file if it exists.
progress : bool (optional)
Print a very basic progress bar while computing the stream.
**time_spec
Specification of how long to integrate. Most commonly, this is a timestep
``dt`` and number of steps ``n_steps``, or a timestep ``dt``, initial time
``t1``, and final time ``t2``. You may also pass in a time array with ``t``.
See documentation for `~gala.integrate.parse_time_specification` for more
information.
Returns
-------
stream_w : `~gala.dynamics.PhaseSpacePosition`
nbody_w : `~gala.dynamics.PhaseSpacePosition`
"""
units = self.hamiltonian.units
t = parse_time_specification(units, **time_spec)
prog_nbody = self._get_nbody(prog_w0, nbody)
nbody_orbits = prog_nbody.integrate_orbit(t=t)
# If the time stepping passed in is negative, assume this means that all
# of the initial conditions are at *end time*, and we first need to
# integrate them backwards before treating them as initial conditions
if t[1] < t[0]:
nbody_orbits = nbody_orbits[::-1]
# TODO: this could be cleaned up...
nbody0 = DirectNBody(
nbody_orbits[0],
prog_nbody.particle_potentials,
external_potential=self.hamiltonian.potential,
frame=self.hamiltonian.frame,
units=units,
)
else:
nbody0 = prog_nbody
prog_orbit = nbody_orbits[:, 0] # Note: Progenitor must be idx 0!
orbit_t = prog_orbit.t.decompose(units).value
# Generate initial conditions from the DF
stream_w0 = self.df.sample(
prog_orbit,
prog_mass,
hamiltonian=self.hamiltonian,
release_every=release_every,
n_particles=n_particles,
)
w0 = np.vstack(
(
stream_w0.xyz.decompose(units).value,
stream_w0.v_xyz.decompose(units).value,
)
).T
w0 = np.ascontiguousarray(w0)
unq_t1s, nstream = np.unique(
stream_w0.release_time.decompose(units).value, return_counts=True
)
all_nstream = np.zeros(prog_orbit.ntimes, dtype=int)
for t1, n in zip(unq_t1s, nstream):
all_nstream[np.isclose(orbit_t, t1)] = n
if output_every is None:
raw_nbody, raw_stream = mockstream_dop853(
nbody0,
orbit_t[all_nstream != 0],
w0,
unq_t1s,
orbit_t[-1],
all_nstream[all_nstream != 0].astype("i4"),
progress=int(progress),
)
else: # store snapshots
if output_filename is None:
raise ValueError(
"If output_every is specified, you must also pass in a filename to "
"store the snapshots in"
)
raw_nbody, raw_stream = mockstream_dop853_animate(
nbody0,
orbit_t,
w0,
all_nstream.astype("i4"),
output_every=output_every,
output_filename=output_filename,
check_filesize=check_filesize,
overwrite=overwrite,
progress=int(progress),
)
x_unit = units["length"]
v_unit = units["length"] / units["time"]
stream_w = MockStream(
pos=raw_stream[:, :3].T * x_unit,
vel=raw_stream[:, 3:].T * v_unit,
release_time=stream_w0.release_time,
lead_trail=stream_w0.lead_trail,
frame=self.hamiltonian.frame,
)
nbody_w = PhaseSpacePosition(
pos=raw_nbody[:, :3].T * x_unit,
vel=raw_nbody[:, 3:].T * v_unit,
frame=self.hamiltonian.frame,
)
return stream_w, nbody_w
|
378e888269b93596cc9f2411f6fdb37ac1aaf658
|
6c29f457a5e787309b344fec53c133845d8985e8
|
/tests/optim/test_adam.py
|
77523d633cf019af917d3757e2dd011a2e5ccde3
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
facebookresearch/fairscale
|
eb378e44cca951e242fb58f82522d9ba8e87d732
|
164cc0f3170b4a3951dd84dda29c3e1504ac4d6e
|
refs/heads/main
| 2023-09-04T12:48:14.924836
| 2023-04-20T03:41:53
| 2023-04-20T03:41:53
| 277,899,703
| 2,553
| 257
|
NOASSERTION
| 2023-08-28T19:02:48
| 2020-07-07T19:02:01
|
Python
|
UTF-8
|
Python
| false
| false
| 13,875
|
py
|
test_adam.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import functools
import pytest
import torch
try:
from fairscale.optim import Adam, GradScaler, Precision
imported_adam = True
except ImportError:
imported_adam = False
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
skip_if_no_adam = pytest.mark.skipif(not imported_adam, reason="Fairscale Adam not available")
@pytest.fixture(autouse=True)
def set_torch_seed():
torch.manual_seed(1)
yield
def make_full_precision_params():
weight = torch.randn(2, 1).cuda().requires_grad_()
bias = torch.randn(2).cuda().requires_grad_()
input = torch.randn(1).cuda()
return weight, bias, input
def make_half_precision_params():
weight = torch.randn(2, 1).cuda().half().requires_grad_()
bias = torch.randn(2).cuda().half().requires_grad_()
input = torch.randn(1).half().cuda()
return weight, bias, input
def step_test(optimizer, weight, bias, input):
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(5):
optimizer.step(fn)
assert fn().item() < initial_value
def state_dict_test(optimizer, weight, bias, input):
def fn_base(optimizer, weight, bias, input):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
return loss
fn = functools.partial(fn_base, optimizer, weight, bias, input)
# Prime the optimizer
for _i in range(5):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = weight.data.clone().requires_grad_()
bias_c = bias.data.clone().requires_grad_()
optimizer_c = Adam([weight_c, bias_c], lr=1e-3, precision=optimizer.precision)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c, input)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict)
for group, group_c in zip(optimizer.param_groups, optimizer_c.param_groups):
for p, p_c in zip(group["params"], group_c["params"]):
assert torch.equal(optimizer.state[p]["exp_avg"], optimizer_c.state[p_c]["exp_avg"])
assert torch.equal(optimizer.state[p]["exp_avg_sq"], optimizer_c.state[p_c]["exp_avg_sq"])
if optimizer.fp32_param_groups:
# When using mixed precision, fp32_param_groups are made from FP16 params rather than
# copied via state_dict, introducing differences between the original optimizer and
# the copy. Because this test requires that they be the exact same, we copy the
# fp32 params from the original optimizer to the copy
optimizer_c.fp32_param_groups = deepcopy(optimizer.fp32_param_groups)
# Run both optimizations in parallel
for _i in range(5):
optimizer.step(fn)
optimizer_c.step(fn_c)
assert torch.equal(weight, weight_c)
assert torch.equal(bias, bias_c)
def assert_almost_zero(x):
assert abs(x) < 1e-3
return 1.0
@skip_if_no_cuda
@skip_if_no_adam
def test_step_full_precision_inferred():
weight, bias, input = make_full_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float32
assert not optimizer.fp32_param_groups
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_mixed_precision_inferred():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
assert len(optimizer.fp32_param_groups) == len(optimizer.param_groups)
for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):
def assert_almost_zero(x):
assert abs(x) < 1e-3
return 1.0
assert fp32_p.dtype == torch.float32
if fp16_p.requires_grad:
assert fp16_p.dtype == torch.float16
(fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_memory_efficient():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float16
assert not optimizer.fp32_param_groups
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float16
assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
assert not optimizer.fp32_param_groups
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).cuda(0).requires_grad_()
bias = torch.randn(10).cuda(1).requires_grad_()
input = torch.randn(5).cuda(0)
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu_mixed_precision():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).cuda(0).half().requires_grad_()
bias = torch.randn(10).cuda(1).half().requires_grad_()
input = torch.randn(5).cuda(0).half()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16_multigpu():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).half().cuda(0).requires_grad_()
bias = torch.randn(10).half().cuda(1).requires_grad_()
input = torch.randn(5).half().cuda(0)
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
step_test(optimizer, weight, bias, input)
assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
@skip_if_no_cuda
@skip_if_no_adam
def test_step_with_grad_scaler():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
scaler = GradScaler()
initial_value = None
for _i in range(5):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
if _i == 0:
initial_value = loss.item()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
assert loss.item() < initial_value
@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_full_precision():
weight, bias, input = make_full_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
@pytest.mark.xfail
def test_state_dict_mixed_precision():
# TODO: Optimizer state gets cast to FP16 and back to FP32 for
# mixed-precision and memory-efficient mixed-precision, resulting
# in a potential loss of precision. Thus, as training proceeds, we don't
# necessarily expect the parameters to remain the exact same.
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MIXED_PRECISION)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
@pytest.mark.xfail
def test_state_dict_memory_efficient():
# TODO: Optimizer state gets cast to FP16 and back to FP32 for
# mixed-precision and memory-efficient mixed-precision, resulting
# in a potential loss of precision. Thus, as training proceeds, we don't
# necessarily expect the parameters to remain the exact same.
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_pure_fp16():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_update_optim_scale():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
optimizer._optim_scale_update_freq = 1
optimizer._optim_scale = 2**15
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
optimizer.step()
assert optimizer._optim_scale == 2**16
@skip_if_no_cuda
@skip_if_no_adam
def test_exploding_optimizer_state():
weight = torch.tensor([[float("inf")]]).half().cuda().requires_grad_()
input = torch.tensor([1.0]).half().cuda().requires_grad_()
optimizer = Adam([weight], lr=1e-3, precision=Precision.PURE_FP16)
optimizer._optim_scale = 1.0
optimizer.zero_grad()
loss = (weight.mv(input)).pow(2).sum()
loss.backward()
with pytest.raises(RuntimeError):
optimizer.step()
@skip_if_no_cuda
@skip_if_no_adam
def test_build_fp32_params():
weight = torch.randn(10, 5).cuda().half().requires_grad_()
bias = torch.randn(10).cuda().half().requires_grad_()
optimizer = Adam([weight, bias], lr=1e-3)
optimizer._build_fp32_params([weight, bias])
for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):
assert fp32_p.dtype == torch.float32
if fp16_p.requires_grad:
assert fp16_p.dtype == torch.float16
(fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)
@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_beta():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(ValueError):
Adam([weight, bias], lr=1e-2, betas=(1.0, 0.0))
@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_weight_decay():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(ValueError):
Adam([weight, bias], lr=1e-2, weight_decay=-1)
@skip_if_no_cuda
@skip_if_no_adam
def test_amsgrad():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(RuntimeError):
Adam([weight, bias], lr=1e-2, amsgrad=True)
@skip_if_no_cuda
@skip_if_no_adam
def test_mixed_precision_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.MIXED_PRECISION)
@skip_if_no_cuda
@skip_if_no_adam
def test_memory_efficient_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
@skip_if_no_cuda
@skip_if_no_adam
def test_pure_fp16_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.PURE_FP16)
|
7340066260053edf022dafbbe2e0377e4f67bdfc
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/hubspot/marketing/events/api/attendance_subscriber_state_changes_api.py
|
966c2a6902b719d6953d78a7e101cea1c691e8d3
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 19,731
|
py
|
attendance_subscriber_state_changes_api.py
|
# coding: utf-8
"""
Marketing Events Extension
These APIs allow you to interact with HubSpot's Marketing Events Extension. It allows you to: * Create, Read or update Marketing Event information in HubSpot * Specify whether a HubSpot contact has registered, attended or cancelled a registration to a Marketing Event. * Specify a URL that can be called to get the details of a Marketing Event. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.marketing.events.api_client import ApiClient
from hubspot.marketing.events.exceptions import ApiTypeError, ApiValueError # noqa: F401
class AttendanceSubscriberStateChangesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create(self, external_event_id, subscriber_state, batch_input_marketing_event_subscriber, **kwargs): # noqa: E501
"""Record # noqa: E501
Record a subscription state between multiple HubSpot contacts and a marketing event, using HubSpot contact ids. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(external_event_id, subscriber_state, batch_input_marketing_event_subscriber, async_req=True)
>>> result = thread.get()
:param external_event_id: The id of the marketing event (required)
:type external_event_id: str
:param subscriber_state: The new subscriber state for the HubSpot contacts and the specified marketing event. For example: 'register', 'attend' or 'cancel'. (required)
:type subscriber_state: str
:param batch_input_marketing_event_subscriber: The details of the contacts to subscribe to the event. Parameters of join and left time if state is Attended. (required)
:type batch_input_marketing_event_subscriber: BatchInputMarketingEventSubscriber
:param external_account_id: The account id associated with the marketing event
:type external_account_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BatchResponseSubscriberVidResponse
"""
kwargs["_return_http_data_only"] = True
return self.create_with_http_info(external_event_id, subscriber_state, batch_input_marketing_event_subscriber, **kwargs) # noqa: E501
def create_with_http_info(self, external_event_id, subscriber_state, batch_input_marketing_event_subscriber, **kwargs): # noqa: E501
"""Record # noqa: E501
Record a subscription state between multiple HubSpot contacts and a marketing event, using HubSpot contact ids. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(external_event_id, subscriber_state, batch_input_marketing_event_subscriber, async_req=True)
>>> result = thread.get()
:param external_event_id: The id of the marketing event (required)
:type external_event_id: str
:param subscriber_state: The new subscriber state for the HubSpot contacts and the specified marketing event. For example: 'register', 'attend' or 'cancel'. (required)
:type subscriber_state: str
:param batch_input_marketing_event_subscriber: The details of the contacts to subscribe to the event. Parameters of join and left time if state is Attended. (required)
:type batch_input_marketing_event_subscriber: BatchInputMarketingEventSubscriber
:param external_account_id: The account id associated with the marketing event
:type external_account_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BatchResponseSubscriberVidResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = ["external_event_id", "subscriber_state", "batch_input_marketing_event_subscriber", "external_account_id"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout", "_request_auth", "_content_type", "_headers"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'external_event_id' is set
if self.api_client.client_side_validation and local_var_params.get("external_event_id") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `external_event_id` when calling `create`") # noqa: E501
# verify the required parameter 'subscriber_state' is set
if self.api_client.client_side_validation and local_var_params.get("subscriber_state") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `subscriber_state` when calling `create`") # noqa: E501
# verify the required parameter 'batch_input_marketing_event_subscriber' is set
if self.api_client.client_side_validation and local_var_params.get("batch_input_marketing_event_subscriber") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `batch_input_marketing_event_subscriber` when calling `create`") # noqa: E501
collection_formats = {}
path_params = {}
if "external_event_id" in local_var_params:
path_params["externalEventId"] = local_var_params["external_event_id"] # noqa: E501
if "subscriber_state" in local_var_params:
path_params["subscriberState"] = local_var_params["subscriber_state"] # noqa: E501
query_params = []
if local_var_params.get("external_account_id") is not None: # noqa: E501
query_params.append(("externalAccountId", local_var_params["external_account_id"])) # noqa: E501
header_params = dict(local_var_params.get("_headers", {}))
form_params = []
local_var_files = {}
body_params = None
if "batch_input_marketing_event_subscriber" in local_var_params:
body_params = local_var_params["batch_input_marketing_event_subscriber"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
content_types_list = local_var_params.get("_content_type", self.api_client.select_header_content_type(["application/json"], "POST", body_params)) # noqa: E501
if content_types_list:
header_params["Content-Type"] = content_types_list
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
response_types_map = {
200: "BatchResponseSubscriberVidResponse",
}
return self.api_client.call_api(
"/marketing/v3/marketing-events/attendance/{externalEventId}/{subscriberState}/create",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
_request_auth=local_var_params.get("_request_auth"),
)
def create_by_email(self, external_event_id, subscriber_state, batch_input_marketing_event_email_subscriber, **kwargs): # noqa: E501
"""Record # noqa: E501
Record a subscription state between multiple HubSpot contacts and a marketing event, using contact email addresses. If contact is not present it will be automatically created. If you set params # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_by_email(external_event_id, subscriber_state, batch_input_marketing_event_email_subscriber, async_req=True)
>>> result = thread.get()
:param external_event_id: The id of the marketing event (required)
:type external_event_id: str
:param subscriber_state: The new subscriber state for the HubSpot contacts and the specified marketing event. For example: 'register', 'attend' or 'cancel'. (required)
:type subscriber_state: str
:param batch_input_marketing_event_email_subscriber: The details of the contacts to subscribe to the event. Parameters of join and left time if state is Attended. (required)
:type batch_input_marketing_event_email_subscriber: BatchInputMarketingEventEmailSubscriber
:param external_account_id: The account id associated with the marketing event
:type external_account_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BatchResponseSubscriberEmailResponse
"""
kwargs["_return_http_data_only"] = True
return self.create_by_email_with_http_info(external_event_id, subscriber_state, batch_input_marketing_event_email_subscriber, **kwargs) # noqa: E501
def create_by_email_with_http_info(self, external_event_id, subscriber_state, batch_input_marketing_event_email_subscriber, **kwargs): # noqa: E501
"""Record # noqa: E501
Record a subscription state between multiple HubSpot contacts and a marketing event, using contact email addresses. If contact is not present it will be automatically created. If you set params # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_by_email_with_http_info(external_event_id, subscriber_state, batch_input_marketing_event_email_subscriber, async_req=True)
>>> result = thread.get()
:param external_event_id: The id of the marketing event (required)
:type external_event_id: str
:param subscriber_state: The new subscriber state for the HubSpot contacts and the specified marketing event. For example: 'register', 'attend' or 'cancel'. (required)
:type subscriber_state: str
:param batch_input_marketing_event_email_subscriber: The details of the contacts to subscribe to the event. Parameters of join and left time if state is Attended. (required)
:type batch_input_marketing_event_email_subscriber: BatchInputMarketingEventEmailSubscriber
:param external_account_id: The account id associated with the marketing event
:type external_account_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BatchResponseSubscriberEmailResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = ["external_event_id", "subscriber_state", "batch_input_marketing_event_email_subscriber", "external_account_id"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout", "_request_auth", "_content_type", "_headers"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create_by_email" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'external_event_id' is set
if self.api_client.client_side_validation and local_var_params.get("external_event_id") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `external_event_id` when calling `create_by_email`") # noqa: E501
# verify the required parameter 'subscriber_state' is set
if self.api_client.client_side_validation and local_var_params.get("subscriber_state") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `subscriber_state` when calling `create_by_email`") # noqa: E501
# verify the required parameter 'batch_input_marketing_event_email_subscriber' is set
if self.api_client.client_side_validation and local_var_params.get("batch_input_marketing_event_email_subscriber") is None: # noqa: E501
raise ApiValueError("Missing the required parameter `batch_input_marketing_event_email_subscriber` when calling `create_by_email`") # noqa: E501
collection_formats = {}
path_params = {}
if "external_event_id" in local_var_params:
path_params["externalEventId"] = local_var_params["external_event_id"] # noqa: E501
if "subscriber_state" in local_var_params:
path_params["subscriberState"] = local_var_params["subscriber_state"] # noqa: E501
query_params = []
if local_var_params.get("external_account_id") is not None: # noqa: E501
query_params.append(("externalAccountId", local_var_params["external_account_id"])) # noqa: E501
header_params = dict(local_var_params.get("_headers", {}))
form_params = []
local_var_files = {}
body_params = None
if "batch_input_marketing_event_email_subscriber" in local_var_params:
body_params = local_var_params["batch_input_marketing_event_email_subscriber"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
content_types_list = local_var_params.get("_content_type", self.api_client.select_header_content_type(["application/json"], "POST", body_params)) # noqa: E501
if content_types_list:
header_params["Content-Type"] = content_types_list
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
response_types_map = {
200: "BatchResponseSubscriberEmailResponse",
}
return self.api_client.call_api(
"/marketing/v3/marketing-events/attendance/{externalEventId}/{subscriberState}/email-create",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
_request_auth=local_var_params.get("_request_auth"),
)
|
e06c311045cdb27639b3dc39d7065d3432ba299a
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/framework/docstr/inv.py
|
65f27dd5af62c6e2c2ca4726407614dc278465a0
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 5,316
|
py
|
inv.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.linalg.inv,
"""linalg.inv(A) -> Tensor
Computes the inverse of a square matrix if it exists.
Throws a `RuntimeError` if the matrix is not invertible.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
for a matrix :math:`A \in \mathbb{K}^{n \times n}`,
its **inverse matrix** :math:`A^{-1} \in \mathbb{K}^{n \times n}` (if it exists) is defined as
.. math::
A^{-1}A = AA^{-1} = \mathrm{I}_n
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
The inverse matrix exists if and only if :math:`A` is `invertible`_. In this case,
the inverse is unique.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices
then the output has the same batch dimensions.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of invertible matrices.
Raises:
RuntimeError: if the matrix :attr:`A` or any matrix in the batch of matrices :attr:`A` is not invertible.
Examples:
.. code-block:: python
>>> import oneflow as flow
>>> A = flow.tensor([[ 1.3408, -0.7788, 1.0551, -0.5866],
... [ 0.8480, 0.8350, 0.9781, -0.1297],
... [-0.0881, -0.6142, -0.3833, 0.3232],
... [ 1.2841, 0.7517, -0.3849, 0.2515]])
>>> flow.linalg.inv(A)
tensor([[ 0.3105, -0.0811, 0.1288, 0.5169],
... [-0.3457, 0.1716, -0.7133, 0.1987],
... [-0.0593, 1.1706, 0.8694, -0.6516],
... [-0.6427, 1.6923, 2.8049, -0.2541]], dtype=oneflow.float32)
>>> A = flow.tensor([[[ 0.6144, 0.1027, -0.1353],
... [-1.4415, -0.6731, 0.3723],
... [ 0.4069, -0.8940, 1.4056]],
... [[-1.1891, -0.3897, -1.5015],
... [ 0.3028, 1.1040, 0.2600],
... [-1.6970, 0.4238, 0.9146]]])
>>> flow.linalg.inv(A)
tensor([[[ 1.6830, 0.0644, 0.1449],
... [-5.9755, -2.5206, 0.0925],
... [-4.2879, -1.6219, 0.7283]],
...
... [[-0.2370, 0.0737, -0.4100],
... [ 0.1892, 0.9579, 0.0384],
... [-0.5274, -0.3070, 0.3148]]], dtype=oneflow.float32)
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
..
Feature Stage of Operator [linalg.inv].
- Maintainer List [@simonJJJ]
- Current Stage [pre Alpha]
- Alpha Stage Check List [ ]
- API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]
- Doc(API Doc must be provided and showed normally on the web page.)[Yes]
- Functionality and its' Test [ ]
- Functionality is highly compatiable with PyTorch 1.11. [Yes]
- eager local [Yes] [@simonJJJ]
- forward [Yes]
- backward [Yes]
- gpu [Yes]
- cpu [Yes]
- graph local [ ] [@simonJJJ]
- forward [Yes]
- backward [ ]
- gpu [Yes]
- cpu [Yes]
- Exception Handling
- Exception Message and Hint must be provided [Yes]
- Beta Stage Check List [ ]
- API(High compatibility with PyTorch 1.11, shouldn't have anything incompatible for a naive reason.)[ ]
- Doc(Same standard as Alpha Stage)[Yes]
- Functionality and its' Test [ ]
- eager global [Yes] [@simonJJJ]
- forward [Yes]
- backward [Yes]
- gpu [Yes]
- cpu [Yes]
- graph gloal [Yes]
- forward [Yes]
- backward [ ]
- gpu [Yes]
- cpu [Yes]
- Performance and Scalability(Must be evaluated.)[ ]
- CUDA kernel [ ]
- CPU kernel [ ]
- N nodes M devices [ ]
- Exception Handling [Yes]
- Exception Message and Hint must be provided [Yes]
- Try you best to do Exception Recovery [Yes]
- Stable Stage Check List [ ]
- API(Same standard as Beta Stage)[ ]
- Doc(Same standard as Beta Stage)[ ]
- Functionality and its' Test [ ]
- fp16 and AMP [ ]
- NHWC [ ]
- Performance and Scalability(Must be evaluated.)[ ]
- Exception Handling [ ]
""",
)
|
525bd768aad10ba2065a553c0ef2568a987076d6
|
f27e3fdc97290b1db6d3fa7039ad59e4f8b5a760
|
/tensorflow/vision/mnist-dnn-rich.py
|
b2e117b38fe165286ad5311dd4290f9b22168823
|
[] |
no_license
|
comet-ml/comet-examples
|
9c7bcea8b97986fb7987cbe0f4533f619e2a0939
|
9da5d4f296e633bb7e63b47dc2d3f7a0780c0a4e
|
refs/heads/master
| 2023-08-19T03:32:51.864273
| 2023-08-09T09:30:34
| 2023-08-09T09:30:34
| 158,587,515
| 134
| 55
| null | 2023-09-13T16:58:41
| 2018-11-21T18:00:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
mnist-dnn-rich.py
|
from __future__ import print_function
import logging
import utils
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from comet_ml import Experiment
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout
params = {
'dropout': 0.2,
'batch-size': 64,
'epochs': 2,
'layer-1-size': 128,
'layer-2-size': 128,
'initial-lr': 1e-2,
'decay-steps': 2000,
'decay-rate': 0.9,
'optimizer': 'adam'
}
def main():
mnist = tf.keras.datasets.mnist
num_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
train(x_train, y_train, x_test, y_test)
def build_model_graph(exp, input_shape=(784,)):
model = Sequential([
Flatten(input_shape=(784, )),
Dense(exp.get_parameter('layer-1-size'), activation='relu'),
Dense(exp.get_parameter('layer-2-size'), activation='relu'),
Dropout(exp.get_parameter('dropout')),
Dense(10)
])
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=exp.get_parameter('initial-lr'),
decay_steps=exp.get_parameter('decay-steps'),
decay_rate=exp.get_parameter('decay-rate'))
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
return model
def train(x_train, y_train, x_test, y_test):
exp=Experiment(project_name="perception",
auto_histogram_gradient_logging=True)
# log custom hyperparameters
exp.log_parameters(params)
# log any custom metric
exp.log_metric('custom_metric', 0.95)
# log a dataset hash
exp.log_dataset_hash(x_train)
# Define model
model = build_model_graph(exp)
model.fit(
x_train,
y_train,
batch_size=exp.get_parameter('batch-size'),
epochs=exp.get_parameter('epochs'),
validation_data=(x_test, y_test),
)
score = model.evaluate(x_test, y_test, verbose=0)
logging.info("Score %s", score)
# Finalize model includes the following calls
# exp.log_confusion_matrix()
# exp.log_image()
# exp.log_histogram_3d()
# exp.add_tag()
# exp.log_model()
utils.finalize_model(model, x_train, y_train, x_test, y_test, exp)
if __name__ == "__main__":
main()
|
c7b292f57472f0a076411f97cb73c3a342d38916
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow_Pandas_Numpy/source3.6/dateutil/tz/__init__.py
|
5a2d9cd6e1f74a5553706ba5e916dc5c6a86267d
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 551
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from .tz import *
from .tz import __doc__
#: Convenience constant providing a :class:`tzutc()` instance
#:
#: .. versionadded:: 2.7.0
UTC = tzutc()
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
"enfold", "datetime_ambiguous", "datetime_exists",
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
class DeprecatedTzFormatWarning(Warning):
"""Warning raised when time zones are parsed from deprecated formats."""
|
2d40b0fc291576b5c6cdd05d95fc4b495296bc5d
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/components/parsers/biaffine/biaffine.py
|
0d2212c923641c930df7c59f4c3f1f817c05deff
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
biaffine.py
|
# MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
class Biaffine(nn.Module):
r"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in + bias_y))
self.reset_parameters()
def __repr__(self):
s = f"n_in={self.n_in}, n_out={self.n_out}"
if self.bias_x:
s += f", bias_x={self.bias_x}"
if self.bias_y:
s += f", bias_y={self.bias_y}"
return f"{self.__class__.__name__}({s})"
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
r"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
# [batch_size, n_out, seq_len, seq_len]
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
# remove dim 1 if n_out == 1
s = s.squeeze(1)
return s
|
a1f333e36a4caf2cf7d13df0448fc7a6702bf9b7
|
4a88c0b94fbff9f13f346b1856d7be467b5ed91a
|
/src/exo/pattern_match.py
|
47a6b5e6336defbe6bf594d9c56f9344c4e7e91b
|
[
"MIT"
] |
permissive
|
exo-lang/exo
|
d440562fa4d81d72cd82c255da27d0daf7493974
|
a7cd77ee0f8d2bab5f3b003491fda33ac5482cb5
|
refs/heads/master
| 2023-08-31T20:20:27.328963
| 2023-08-07T16:01:18
| 2023-08-07T16:01:18
| 285,721,510
| 220
| 19
|
MIT
| 2023-09-14T00:20:05
| 2020-08-07T02:49:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,419
|
py
|
pattern_match.py
|
from __future__ import annotations
import inspect
import re
from typing import Optional, Iterable
import exo.pyparser as pyparser
from exo.LoopIR import LoopIR, PAST
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Pattern Matching Errors
from exo.internal_cursors import Cursor, Node, Block
class PatternMatchError(Exception):
pass
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# General Pattern-Matching / Pointing Mechanism
"""
We will use <pattern-string>s as a way to point at AST nodes.
A <pattern-string> has the following form:
<pattern-string> ::= <pattern> #<num>
| <pattern>
<pattern> ::= ... -- a UAST statement or expression
-- potentially involving one or more holes
-- where a hole is written `_`
-- specified by LoopIR.PAST
"""
def get_match_no(pattern_str: str) -> Optional[int]:
"""
Search for a trailing # sign in a pattern string and return the following
number, or None if it does no # sign exists. Uses `int` to parse the number,
so is not sensitive to spaces
>>> get_match_no('foo #34')
34
>>> get_match_no('baz # 42 ')
42
>>> get_match_no('foo') is None
True
>>> get_match_no('foo #bar')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'bar'
"""
if (pos := pattern_str.rfind("#")) == -1:
return None
return int(pattern_str[pos + 1 :])
def match_pattern(context, pattern_str, call_depth=0, default_match_no=None):
assert isinstance(context, Cursor), f"Expected Cursor, got {type(context)}"
# break-down pattern_str for possible #<num> post-fix
if match := re.search(r"^([^#]+)#(\d+)\s*$", pattern_str):
pattern_str = match[1]
match_no = int(match[2]) if match[2] is not None else None
else:
match_no = default_match_no # None means match-all
# get source location where this is getting called from
caller = inspect.getframeinfo(inspect.stack()[call_depth + 1][0])
# parse the pattern we're going to use to match
p_ast = pyparser.pattern(
pattern_str, filename=caller.filename, lineno=caller.lineno
)
# do the pattern match, to find the nodes in ast
return PatternMatch().find(context, p_ast, match_no=match_no)
_PAST_to_LoopIR = {
# list of stmts
list: list,
#
PAST.Assign: [LoopIR.Assign],
PAST.Reduce: [LoopIR.Reduce],
PAST.Pass: [LoopIR.Pass],
PAST.If: [LoopIR.If],
PAST.Seq: [LoopIR.Seq],
PAST.Alloc: [LoopIR.Alloc],
PAST.Call: [LoopIR.Call],
PAST.WriteConfig: [LoopIR.WriteConfig],
PAST.S_Hole: None,
#
PAST.Read: [LoopIR.Read],
PAST.StrideExpr: [LoopIR.StrideExpr],
PAST.Const: [LoopIR.Const],
PAST.USub: [LoopIR.USub],
PAST.BinOp: [LoopIR.BinOp],
PAST.BuiltIn: [LoopIR.BuiltIn],
PAST.ReadConfig: [LoopIR.ReadConfig],
PAST.E_Hole: None,
}
class _MatchComplete(Exception):
pass
class PatternMatch:
def __init__(self):
self._match_no = None
self._results = []
def find(self, cur, pat, match_no=None):
self._match_no = match_no
self._results = []
# prevent the top level of a pattern being just a hole
if isinstance(pat, PAST.E_Hole):
raise PatternMatchError("pattern match on 'anything' unsupported")
elif isinstance(pat, list) and all(isinstance(p, PAST.S_Hole) for p in pat):
raise PatternMatchError("pattern match on 'anything' unsupported")
try:
if isinstance(pat, list):
assert len(pat) > 0
self.find_stmts(pat, cur)
else:
assert isinstance(pat, PAST.expr)
self.find_expr(pat, cur)
except _MatchComplete:
pass
return self._results
def _add_result(self, result):
assert isinstance(result, (Node, Block))
if self._match_no is None:
self._results.append(result)
return
i = self._match_no
self._match_no -= 1
if i == 0:
self._results.append(result)
raise _MatchComplete()
## -------------------
## finding methods
def find_expr(self, pat, cur):
# try to match
if self.match_e(pat, cur._node):
self._add_result(cur)
for child in _children(cur):
self.find_expr(pat, child)
def find_stmts(self, pats, cur: Node):
if isinstance(cur._node, LoopIR.proc):
return self.find_stmts_in_block(pats, cur.body())
return self.find_stmts_in_block(pats, cur.as_block())
def find_stmts_in_block(self, pats, curs: Block):
# may encounter empty statement blocks, which we should ignore
if len(curs) == 0:
return
# try to match a prefix of this sequence of statements
if m := self.match_stmts(pats, curs):
self._add_result(m)
# if we need to look for more matches, recurse structurally ...
# first, look for any subsequences of statements in the first
# statement of the sequence `stmts`
if isinstance(curs[0]._node, LoopIR.If):
self.find_stmts_in_block(pats, curs[0].body())
self.find_stmts_in_block(pats, curs[0].orelse())
elif isinstance(curs[0]._node, LoopIR.Seq):
self.find_stmts_in_block(pats, curs[0].body())
else:
pass # other forms of statement do not contain stmt blocks
# second, recurse on the tail of this sequence...
self.find_stmts_in_block(pats, curs[1:])
## -------------------
## matching methods
def match_stmts(self, pats, cur):
i, j = 0, 0
while i < len(pats) and j < len(cur):
if isinstance(pats[i], PAST.S_Hole):
if i + 1 == len(pats):
return cur # No lookahead, guaranteed match
if self.match_stmt(pats[i + 1], cur[j]):
i += 2 # Lookahead matches, skip hole and lookahead
elif self.match_stmt(pats[i], cur[j]):
i += 1
else:
return None
j += 1
# Return the matched portion on success
return cur[:j] if i == len(pats) else None
def match_stmt(self, pat, cur):
assert not isinstance(pat, PAST.S_Hole), "holes must be handled in match_stmts"
# first ensure that the pattern and statement
# are the same constructor
stmt = cur._node
if not isinstance(
stmt, (LoopIR.WindowStmt,) + tuple(_PAST_to_LoopIR[type(pat)])
):
return False
# then handle each constructor as a structural case
if isinstance(stmt, (LoopIR.Assign, LoopIR.Reduce)):
return (
self.match_name(pat.name, stmt.name)
and all(self.match_e(pi, si) for pi, si in zip(pat.idx, stmt.idx))
and self.match_e(pat.rhs, stmt.rhs)
)
elif isinstance(stmt, LoopIR.WindowStmt):
if isinstance(pat, PAST.Assign):
return (
self.match_name(pat.name, stmt.lhs)
and pat.idx == []
and self.match_e(pat.rhs, stmt.rhs)
)
else:
return False
elif isinstance(stmt, LoopIR.Pass):
return True
elif isinstance(stmt, LoopIR.If):
return (
self.match_e(pat.cond, stmt.cond)
and self.match_stmts(pat.body, cur.body()) is not None
and self.match_stmts(pat.orelse, cur.orelse()) is not None
)
elif isinstance(stmt, LoopIR.Seq):
return (
self.match_name(pat.iter, stmt.iter)
and self.match_e(pat.lo, stmt.lo)
and self.match_e(pat.hi, stmt.hi)
and self.match_stmts(pat.body, cur.body()) is not None
)
elif isinstance(stmt, LoopIR.Alloc):
if isinstance(stmt.type, LoopIR.Tensor):
return all(
self.match_e(pi, si) for pi, si in zip(pat.sizes, stmt.type.hi)
) and self.match_name(pat.name, stmt.name)
else: # scalar
return self.match_name(pat.name, stmt.name)
elif isinstance(stmt, LoopIR.Call):
return self.match_name(pat.f, stmt.f.name)
elif isinstance(stmt, LoopIR.WriteConfig):
return self.match_name(stmt.config.name(), pat.config) and self.match_name(
stmt.field, pat.field
)
else:
assert False, f"bad case: {type(stmt)}"
def match_e(self, pat, e):
# expression holes can match anything
# and we don't have to worry about Kleene-Star behavior
if isinstance(pat, PAST.E_Hole):
return True
# Special case: -3 can be parsed as USub(Const(3))... it should match Const(-3)
if (
isinstance(pat, PAST.USub)
and isinstance(pat.arg, PAST.Const)
and isinstance(e, LoopIR.Const)
):
pat = pat.arg.update(val=-pat.arg.val)
# first ensure that the pattern and statement
# are the same constructor
if not isinstance(e, (LoopIR.WindowExpr,) + tuple(_PAST_to_LoopIR[type(pat)])):
return False
if isinstance(e, LoopIR.Read):
return self.match_name(pat.name, e.name) and all(
self.match_e(pi, si) for pi, si in zip(pat.idx, e.idx)
)
elif isinstance(e, LoopIR.WindowExpr):
if isinstance(pat, PAST.Read):
# TODO: Should we be able to handle window slicing matching? Nah..
if len(pat.idx) != 1 or not isinstance(pat.idx[0], PAST.E_Hole):
return False
return self.match_name(pat.name, e.name)
else:
return False
elif isinstance(e, LoopIR.Const):
return pat.val == e.val
elif isinstance(e, LoopIR.BinOp):
# TODO: do we need to handle associativity? (a + b) + c vs a + (b + c)?
return (
pat.op == e.op
and self.match_e(pat.lhs, e.lhs)
and self.match_e(pat.rhs, e.rhs)
)
elif isinstance(e, LoopIR.USub):
return self.match_e(pat.arg, e.arg)
elif isinstance(e, LoopIR.BuiltIn):
return pat.f is e.f and all(
self.match_e(pa, sa) for pa, sa in zip(pat.args, e.args)
)
elif isinstance(e, LoopIR.ReadConfig):
return pat.config == e.config.name() and pat.field == e.field
elif isinstance(e, LoopIR.StrideExpr):
return self.match_name(pat.name, e.name) and (
pat.dim == e.dim or not bool(pat.dim)
)
else:
assert False, "bad case"
@staticmethod
def match_name(pat_nm, ir_sym):
return pat_nm == "_" or pat_nm == str(ir_sym)
def _children(cur) -> Iterable[Node]:
n = cur._node
# Top-level proc
if isinstance(n, LoopIR.proc):
yield from _children_from_attrs(cur, n, "body")
# Statements
elif isinstance(n, (LoopIR.Assign, LoopIR.Reduce)):
yield from _children_from_attrs(cur, n, "idx", "rhs")
elif isinstance(n, (LoopIR.WriteConfig, LoopIR.WindowStmt)):
yield from _children_from_attrs(cur, n, "rhs")
elif isinstance(n, (LoopIR.Pass, LoopIR.Alloc, LoopIR.Free)):
yield from []
elif isinstance(n, LoopIR.If):
yield from _children_from_attrs(cur, n, "cond", "body", "orelse")
elif isinstance(n, LoopIR.Seq):
yield from _children_from_attrs(cur, n, "lo", "hi", "body")
elif isinstance(n, LoopIR.Call):
yield from _children_from_attrs(cur, n, "args")
# Expressions
elif isinstance(n, LoopIR.Read):
yield from _children_from_attrs(cur, n, "idx")
elif isinstance(n, LoopIR.WindowExpr):
yield from _children_from_attrs(cur, n, "idx")
elif isinstance(n, LoopIR.Interval):
yield from _children_from_attrs(cur, n, "lo", "hi")
elif isinstance(n, LoopIR.Point):
yield from _children_from_attrs(cur, n, "pt")
elif isinstance(
n,
(
LoopIR.Const,
LoopIR.StrideExpr,
LoopIR.ReadConfig,
),
):
yield from []
elif isinstance(n, LoopIR.USub):
yield from _children_from_attrs(cur, n, "arg")
elif isinstance(n, LoopIR.BinOp):
yield from _children_from_attrs(cur, n, "lhs", "rhs")
elif isinstance(n, LoopIR.BuiltIn):
yield from _children_from_attrs(cur, n, "args")
else:
assert False, f"case {type(n)} unsupported"
def _children_from_attrs(cur, n, *args) -> Iterable[Node]:
for attr in args:
children = getattr(n, attr)
if isinstance(children, list):
for i in range(len(children)):
yield cur._child_node(attr, i)
else:
yield cur._child_node(attr, None)
|
0006fa9cfd613f4baa9757c5a8fd7e45a829fa8d
|
767b09cdf51803d533ebb5906042ed1f92f91a7c
|
/allennlp_models/rc/dataset_readers/__init__.py
|
6e227fc66c0dd353c6e35a224ae2f04a56def698
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp-models
|
e93bb3b084e99e211d5ebb515b765de117e41970
|
b1f372248c17ad12684d344955fbcd98e957e77e
|
refs/heads/main
| 2023-09-05T01:57:37.434101
| 2022-11-24T00:06:05
| 2022-11-24T00:06:05
| 246,170,605
| 520
| 172
|
Apache-2.0
| 2022-11-24T00:06:06
| 2020-03-10T00:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
__init__.py
|
from allennlp_models.rc.dataset_readers.drop import DropReader
from allennlp_models.rc.dataset_readers.qangaroo import QangarooReader
from allennlp_models.rc.dataset_readers.quac import QuACReader
from allennlp_models.rc.dataset_readers.squad import SquadReader
from allennlp_models.rc.dataset_readers.transformer_squad import TransformerSquadReader
from allennlp_models.rc.dataset_readers.triviaqa import TriviaQaReader
|
15d340b4fa68161687b70883517ebab3f4af84ef
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/services/nbi/service.py
|
fb2571ac11b0f031d81388e2745af29a22f65d63
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 803
|
py
|
service.py
|
#!./bin/python
# ----------------------------------------------------------------------
# nbi service
# ----------------------------------------------------------------------
# Copyright (C) 2007-2023 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.service.fastapi import FastAPIService
from noc.config import config
PREFIX_NBI = "/api/nbi/"
class NBIService(FastAPIService):
name = "nbi"
use_mongo = True
use_router = True
if config.features.traefik:
traefik_backend = "nbi"
traefik_frontend_rule = "PathPrefix:/api/nbi"
def __init__(self):
super().__init__()
self.collect_req_api_metric = True
if __name__ == "__main__":
NBIService().start()
|
5d404811bb5799c21265621e53cc2963d0b318b9
|
66040b97481106cd596db1557b4de8250835f657
|
/buffalo/evaluate/base.py
|
2a5146984cc5d300cf87ab880c7fc6c3b25eaed0
|
[
"Apache-2.0",
"MPL-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kakao/buffalo
|
1ac94cfc74be5e1b4889257c75f921d29caa4d92
|
ee01d5bee1dffecf207c28df511c9724acce41d7
|
refs/heads/dev
| 2023-08-22T22:11:05.643603
| 2023-06-27T16:50:48
| 2023-06-27T16:50:48
| 204,726,945
| 615
| 140
|
Apache-2.0
| 2023-09-14T04:44:48
| 2019-08-27T14:56:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,060
|
py
|
base.py
|
import numpy as np
from buffalo.parallel._core import quickselect
class Evaluable(object):
def __init__(self, *args, **kargs):
pass
def prepare_evaluation(self):
if not self.opt.validation or not self.data.has_group("vali"):
return
if hasattr(self.data, "vali_data") is False:
self.data._prepare_validation_data()
def show_validation_results(self):
results = self.get_validation_results()
if not results:
return "No validation results"
return "Validation results: " + ", ".join(f"{k}: {v:0.5f}" for k, v in results.items())
def get_validation_results(self):
if not self.opt.validation or not self.data.has_group("vali"):
return
results = {}
results.update(self._evaluate_ranking_metrics())
results.update(self._evaluate_score_metrics())
return results
def get_topk(self, scores, k, sorted=True, num_threads=4):
# NOTE: Is it necessary condition?
# assert k < scores.shape[1], f"k ({k}) should be smaller than cols ({scores.shape[1]})"
is_many = True
if len(scores.shape) == 1:
scores = scores.reshape(1, scores.shape[0])
is_many = False
k = min(k, scores.shape[1])
assert k > 0, f"k({k}) or cols({scores.shape[1]}) should be greater than 0"
result = np.empty(shape=(scores.shape[0], k), dtype=np.int32)
quickselect(scores, result, sorted, num_threads)
return result if is_many else result[0]
def _evaluate_ranking_metrics(self):
if hasattr(self.data, "vali_data") is False:
self.prepare_evaluation()
batch_size = self.opt.validation.get("batch", 128)
topk = self.opt.validation.topk
gt = self.data.vali_data["vali_gt"]
rows = self.data.vali_data["vali_rows"]
validation_seen = self.data.vali_data["validation_seen"]
validation_max_seen_size = self.data.vali_data["validation_max_seen_size"]
num_items = self.data.get_header()["num_items"]
# can significantly save evaluation time
if self.opt.validation.eval_samples:
size = min(self.opt.validation.eval_samples, len(rows))
rows = np.random.choice(rows, size=size, replace=False)
NDCG = 0.0
AP = 0.0
HIT = 0.0
AUC = 0.0
N = 0.0
idcgs = np.cumsum(1.0 / np.log2(np.arange(2, topk + 2)))
dcgs = 1.0 / np.log2(np.arange(2, topk + 2))
def filter_seen_items(_topk, seen, topk):
ret = []
for t in _topk:
if t not in seen:
ret.append(t)
if len(ret) >= topk:
break
return ret
for index in range(0, len(rows), batch_size):
recs = self._get_topk_recommendation(rows[index:index + batch_size],
topk=topk + validation_max_seen_size)
for row, _topk in recs:
seen = validation_seen.get(row, set())
if len(seen) == 0:
continue
_topk = filter_seen_items(_topk, seen, topk)
_gt = gt[row]
# accuracy
hit = len(set(_topk) & _gt) / len(_gt)
HIT += hit
# ndcg, map
idcg = idcgs[min(len(_gt), topk) - 1]
dcg = 0.0
hit, miss, ap = 0.0, 0.0, 0.0
# AUC
num_pos_items = len(_gt)
num_neg_items = num_items - num_pos_items
auc = 0.0
for i, r in enumerate(_topk):
if r in _gt:
hit += 1
ap += (hit / (i + 1.0))
dcg += dcgs[i]
else:
miss += 1
auc += hit
auc += ((hit + num_pos_items) / 2.0) * (num_neg_items - miss)
auc /= (num_pos_items * num_neg_items)
ndcg = dcg / idcg
NDCG += ndcg
ap /= min(len(_gt), topk)
AP += ap
N += 1.0
AUC += auc
NDCG /= N
AP /= N
ACC = HIT / N
AUC = AUC / N
ret = {"ndcg": NDCG, "map": AP, "accuracy": ACC, "auc": AUC}
return ret
def _evaluate_score_metrics(self):
if hasattr(self.data, "vali_data") is False:
self.prepare_evaluation()
vali_data = self.data.vali_data
row = vali_data["row"]
col = vali_data["col"]
val = vali_data["val"]
scores = self._get_scores(row, col)
ERROR = 0.0
RMSE = 0.0
for r, c, p, v in zip(row, col, scores, val):
err = p - v
ERROR += abs(err)
RMSE += err * err
RMSE /= len(scores)
RMSE = RMSE ** 0.5
ERROR /= len(scores)
return {"rmse": RMSE, "error": ERROR}
|
5d23dfa253fa19fc9e5cb683c512da125dba65dc
|
b18cf6b2f861bfda619312fc2fcb6ea990d7c57e
|
/rivalcfg/mouse.py
|
f3a396b267f4dc6095292061159fc273110a6bd1
|
[
"WTFPL"
] |
permissive
|
flozz/rivalcfg
|
8b45df8e04eefe32bfff460f4493d11ce5b77975
|
32800c58d8c067d7187611f6207a6bfe6b17ade0
|
refs/heads/master
| 2023-08-03T01:44:56.734014
| 2023-07-11T10:50:23
| 2023-07-11T10:50:23
| 54,918,040
| 741
| 103
|
WTFPL
| 2023-09-11T12:33:56
| 2016-03-28T19:30:28
|
Python
|
UTF-8
|
Python
| false
| false
| 11,654
|
py
|
mouse.py
|
import time
from . import usbhid
from . import devices
from . import handlers
from . import helpers
from . import mouse_settings
def get_mouse(vendor_id=0x1038, product_id=None):
"""Get a :class:`Mouse` instance to manipulate requested device.
:param int vendor_id: The vendor id of the device (optional, by default
this is set to the SteelSeries vendor id
(``0x1038``)).
:param int product_id: The product id of one of the supported device (e.g.
``0x1702``).
:raise rivalcfg.devices.UnsupportedDevice: The requested device is not
supported by rivalcfg.
:rtype: Mouse
>>> from rivalcfg.mouse import get_mouse
>>> get_mouse(vendor_id=0x1038, product_id=0x1702)
<Mouse SteelSeries Rival 100 (1038:1702:00)>
"""
if not product_id:
raise ValueError("You must define the 'product_id' parameter")
profile = devices.get_profile(vendor_id, product_id)
settings = mouse_settings.get_mouse_settings(
vendor_id,
product_id,
profile,
)
hid_device = usbhid.open_device(vendor_id, product_id, profile["endpoint"])
return Mouse(hid_device, profile, settings)
class Mouse:
"""Generic class to handle any supported mouse.
.. NOTE::
Additional methods are available in this class depending on the loaded
profile. Read device specific documentation for more information.
.. WARNING::
You should not instanciate this class yourself. Use the
:func:`get_mouse` factory function instead.
:param hid_device: The HID device to write in (provided by the
:func:`rivalcfg.usbhid.open_device`).
:param mouse_profile: One of the rivalcfg mouse profile (provided by
:func:`rivalcfg.devices.get_profile`).
>>> from rivalcfg import usbhid
>>> from rivalcfg import devices
>>> from rivalcfg.mouse import Mouse
>>> from rivalcfg.mouse_settings import get_mouse_settings
>>> profile = devices.get_profile(vendor_id=0x1038, product_id=0x1702)
>>> settings = get_mouse_settings(0x1038, 0x1702, profile)
>>> Mouse(
... usbhid.open_device(vendor_id=0x1038, product_id=0x1702, endpoint=0),
... profile,
... settings,
... )
<Mouse SteelSeries Rival 100 (1038:1702:00)>
"""
#: The mouse settings (``rivalcfg.devices.*``)
mouse_profile = None
#: The mouse settings (:class:`rivalcfg.mouse_settings.MouseSettings`)
mouse_settings = None
def __init__(self, hid_device, mouse_profile, mouse_settings):
"""Constructor."""
self._hid_device = hid_device
self.mouse_profile = mouse_profile
self.mouse_settings = mouse_settings
@property
def name(self):
"""The mouse name."""
return self.mouse_profile["name"]
@property
def vendor_id(self):
"""The mouse vendor id."""
return self.mouse_profile["vendor_id"]
@property
def product_id(self):
"""The mouse product id."""
return self.mouse_profile["product_id"]
@property
def firmware_version_tuple(self):
"""The firmware version of the device as a tuple (e.g.``(1, 33)``,
``(0,)`` if not available).
"""
if "firmware_version" not in self.mouse_profile:
return (0,)
self._hid_write(
self.mouse_profile["firmware_version"]["report_type"],
data=self.mouse_profile["firmware_version"]["command"],
)
version = self._hid_device.read(
self.mouse_profile["firmware_version"]["response_length"],
timeout_ms=200,
)
if not version:
return (0,)
return tuple(version)
@property
def firmware_version(self):
"""The firmware version as an human readable string (e.g. ``"1.33"``,
``"0"`` if not available).
"""
return ".".join([str(i) for i in self.firmware_version_tuple])
@property
def battery(self):
"""Information about the device battery.
:rtype: dict
:return: ``{"is_charging": True|False|None, "level": int(0-100)|None}``.
.. NOTE::
A value of ``None`` means the feature is not supported or that the mouse is turned off.
"""
result = {
"is_charging": None,
"level": None,
}
if "battery_level" not in self.mouse_profile:
return result
self._hid_write(
self.mouse_profile["battery_level"]["report_type"],
data=self.mouse_profile["battery_level"]["command"],
)
data = self._hid_device.read(
self.mouse_profile["battery_level"]["response_length"],
timeout_ms=200,
)
try:
if "is_charging" in self.mouse_profile["battery_level"]:
result["is_charging"] = self.mouse_profile["battery_level"][
"is_charging"
](data)
except Exception:
pass
try:
if "level" in self.mouse_profile["battery_level"]:
result["level"] = self.mouse_profile["battery_level"]["level"](data)
except Exception:
pass
if result["level"] > 100 or result["level"] < 0:
return {"is_charging": None, "level": None}
return result
def reset_settings(self):
"""Sets all settings to their factory default values."""
for name, setting_info in self.mouse_profile["settings"].items():
method_name = "set_%s" % name
method = getattr(self, method_name)
if (
"value_type" in setting_info
and setting_info["value_type"]
and setting_info["value_type"] != "none"
):
method(setting_info["default"])
else:
method()
def save(self):
"""Save current config to the mouse internal memory."""
# This should never happen... But who knows...
if (
"save_command" not in self.mouse_profile
or not self.mouse_profile["save_command"]
):
raise Exception("This mouse does not provide any save command.")
packet_length = 0
if "packet_length" in self.mouse_profile["save_command"]:
packet_length = self.mouse_profile["save_command"]["packet_length"]
self._hid_write(
report_type=self.mouse_profile["save_command"]["report_type"],
data=self.mouse_profile["save_command"]["command"],
packet_length=packet_length,
)
response = None
if (
"readback_length" in self.mouse_profile["save_command"]
and self.mouse_profile["save_command"]["readback_length"]
):
response = self._hid_device.read(
self.mouse_profile["save_command"]["readback_length"],
timeout_ms=200,
)
self.mouse_settings.save()
return response
def close(self):
"""Close the device.
.. WARNING::
Once called, any access of the Mouse class properties or function
may raise an error.
"""
self._hid_device.close()
def _hid_write(
self,
report_type=usbhid.HID_REPORT_TYPE_OUTPUT,
report_id=0x00,
data=[],
packet_length=0,
):
"""
Write data to the device.
:param int report_type: The HID report type
(:data:`rivalcfg.usbhid.HID_REPORT_TYPE_OUTPUT`
or
:data:`rivalcfg.usbhid.HID_REPORT_TYPE_FEATURE`).
:param int report_id: The id of the report (always ``0x00``).
:param list(int) data: The data to send to the mouse.
:param int packet_length: The fixed length of the packet that will be
sent to the device (default: ``0`` (no fixed
length)).
:raises ValueError: Invalid report type, or HID device not openned.
"""
if packet_length:
bytes_ = bytearray(
helpers.merge_bytes(
report_id, data, [0x00] * (packet_length - len(data))
)
)
else:
bytes_ = bytearray(helpers.merge_bytes(report_id, data))
if report_type == usbhid.HID_REPORT_TYPE_OUTPUT:
self._hid_device.write(bytes_)
elif report_type == usbhid.HID_REPORT_TYPE_FEATURE:
self._hid_device.send_feature_report(bytes_)
else:
raise ValueError("Invalid HID report type: %2x" % report_type)
# Avoids sending multiple commands to quickly
time.sleep(0.05)
def __getattr__(self, name):
# Handle every set_xxx methods generated from device's profiles
if not name.startswith("set_"):
raise AttributeError("Mouse instance has no attribute '%s'" % name)
setting_name = name[4:]
if setting_name not in self.mouse_profile["settings"]:
raise AttributeError("Mouse instance has no attribute '%s'" % name)
setting_info = self.mouse_profile["settings"][setting_name]
handler_name = None
if "value_type" in setting_info and setting_info["value_type"]:
handler_name = setting_info["value_type"]
if handler_name not in helpers.module_ls(handlers):
raise ValueError(
"Unknown handler '%s' for '%s' setting of the %s"
% (
handler_name,
setting_name,
self.mouse_profile["name"],
)
)
packet_length = 0
if "packet_length" in setting_info:
packet_length = setting_info["packet_length"]
suffix = []
if "command_suffix" in setting_info:
suffix = setting_info["command_suffix"]
def _exec_command(*args):
data = []
if handler_name:
data = getattr(handlers, handler_name).process_value(
setting_info, *args
)
# Write data to the device
self._hid_write(
report_type=setting_info["report_type"],
data=helpers.merge_bytes(setting_info["command"], data, suffix),
packet_length=packet_length,
)
# Readback when required
response = None
if "readback_length" in setting_info and setting_info["readback_length"]:
response = self._hid_device.read(
setting_info["readback_length"],
timeout_ms=200,
)
# Save settings
if len(args) == 1:
self.mouse_settings.set(setting_name, args[0])
else:
self.mouse_settings.set(setting_name, args)
#
return response
return _exec_command
def __repr__(self):
return "<Mouse %s (%04x:%04x:%02x)>" % (
self.mouse_profile["name"],
self.mouse_profile["vendor_id"],
self.mouse_profile["product_id"],
self.mouse_profile["endpoint"],
)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
|
d5091d43e50b138555ad4b2daed4f323badcc7b1
|
6c9e1a5139ca56b7a5df7d1e7cc7ce4f60e1c8af
|
/histomicstk/saliency/__init__.py
|
6e6bda656139f806a57042efcae6f61d243ea0bf
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
DigitalSlideArchive/HistomicsTK
|
251b016066144fbad3efb2065993d7981265ab04
|
c03c852e72f1497d22535c6b7d5aba25c74e620d
|
refs/heads/master
| 2023-08-31T02:32:13.773082
| 2023-08-30T20:40:45
| 2023-08-30T20:40:45
| 44,324,447
| 351
| 125
|
Apache-2.0
| 2023-09-13T12:24:13
| 2015-10-15T14:49:21
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
__init__.py
|
"""This module contains various tools to detect tissue.
Included functionality includes methods to detect tissue in a whole-slide image
(i.e. segment foreground from white space), as well as specific workflows
that detect artifacts, specific components (eg blood), and
highly-cellular regions within detected tissue.
"""
|
ef0a9ad2609fcb0e7866c5b860382f87e42ca655
|
195131d9f486825f2077f4094c108154543a5acb
|
/tests/test_link.py
|
232a53608465960eef813b9586c6e0dc1f74d69a
|
[
"MIT"
] |
permissive
|
moshi4/pyGenomeViz
|
7e18427b05d5aa64d67681c2a90d13c2da75bf34
|
2f9f96f8468fe1529ddffa73e0aede2302835595
|
refs/heads/main
| 2023-08-29T14:35:36.999230
| 2023-08-26T08:26:32
| 2023-08-26T08:26:32
| 494,371,930
| 158
| 11
|
MIT
| 2023-08-26T08:26:33
| 2022-05-20T07:52:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
test_link.py
|
import pytest
from pygenomeviz.link import Link
def test_is_inverted():
"""Test inverted check"""
# Case1. forward-forward
ff_link = Link("1", 1, 100, "2", 101, 200)
assert ff_link.is_inverted is False
# Case2. forward-reverse
fr_link = Link("1", 1, 100, "2", 200, 101)
assert fr_link.is_inverted is True
# Case3. reverse-reverse
rr_link = Link("1", 100, 1, "2", 200, 101)
assert rr_link.is_inverted is False
def test_add_offset():
"""Test add offset to link"""
name1, start1, end1, offset1 = "link1", 1, 100, 200
name2, start2, end2, offset2 = "link2", 101, 200, 250
link = Link(name1, start1, end1, name2, start2, end2)
offset_link = link.add_offset({name1: offset1, name2: offset2})
# Check original instance is not changed
assert link.track_start1 == start1
assert link.track_end1 == end1
assert link.track_start2 == start2
assert link.track_end2 == end2
# Check offset added new instance is changed
assert offset_link.track_start1 == start1 + offset1
assert offset_link.track_end1 == end1 + offset1
assert offset_link.track_start2 == start2 + offset2
assert offset_link.track_end2 == end2 + offset2
def test_color_string_error():
"""Test color string error"""
link1, link2 = ("link1", 1, 100), ("link2", 101, 200)
invalid_color = "nocolor"
# Case1. normal color is not color like string
with pytest.raises(ValueError):
Link(*link1, *link2, normal_color=invalid_color)
# Case2. inverted color is not color like string
with pytest.raises(ValueError):
Link(*link1, *link2, inverted_color=invalid_color)
def test_size_ratio_error():
"""Test size ratio error"""
link1, link2 = ("link1", 1, 100), ("link2", 101, 200)
# Case1. size_ratio < 0
with pytest.raises(ValueError):
Link(*link1, *link2, size_ratio=-1)
# Case2. size_ratio > 1
with pytest.raises(ValueError):
Link(*link1, *link2, size_ratio=2)
def test_interpolation_value_error():
"""Test interpolation value error"""
link1, link2 = ("link1", 1, 100), ("link2", 101, 200)
# Case1. vmin < 0
with pytest.raises(ValueError):
Link(*link1, *link2, v=50, vmin=-100)
# Case2. vmax > 100
with pytest.raises(ValueError):
Link(*link1, *link2, v=50, vmax=200)
# Case3. v < vmin
with pytest.raises(ValueError):
Link(*link1, *link2, v=50, vmin=60)
# Case4. v > vmax
with pytest.raises(ValueError):
Link(*link1, *link2, v=90, vmax=80)
|
97c9f144c0d7ac13905b7c5d6f32514b34384651
|
8880226d2ca1c9448c44b3e9f21226a58e61ac93
|
/awacs/memorydb.py
|
510ee84842949f581f71f138cab52f8bffb7e901
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
cloudtools/awacs
|
2f82958ccc7ba2177492c29c706a5737f19dd2d1
|
c449a9637f01c26e73b827a9f8d5cc7715bbbea2
|
refs/heads/main
| 2023-08-31T00:58:28.636568
| 2023-08-28T05:13:01
| 2023-08-28T05:13:01
| 9,062,692
| 385
| 107
|
BSD-2-Clause
| 2023-08-13T23:21:39
| 2013-03-27T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
memorydb.py
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from typing import Optional
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon MemoryDB"
prefix = "memorydb"
class Action(BaseAction):
def __init__(self, action: Optional[str] = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchUpdateCluster = Action("BatchUpdateCluster")
BatchUpdateClusters = Action("BatchUpdateClusters")
Connect = Action("Connect")
CopySnapshot = Action("CopySnapshot")
CreateAcl = Action("CreateAcl")
CreateCluster = Action("CreateCluster")
CreateParameterGroup = Action("CreateParameterGroup")
CreateSnapshot = Action("CreateSnapshot")
CreateSubnetGroup = Action("CreateSubnetGroup")
CreateUser = Action("CreateUser")
DeleteAcl = Action("DeleteAcl")
DeleteCluster = Action("DeleteCluster")
DeleteParameterGroup = Action("DeleteParameterGroup")
DeleteSnapshot = Action("DeleteSnapshot")
DeleteSubnetGroup = Action("DeleteSubnetGroup")
DeleteUser = Action("DeleteUser")
DescribeAcls = Action("DescribeAcls")
DescribeClusters = Action("DescribeClusters")
DescribeEngineVersions = Action("DescribeEngineVersions")
DescribeEvents = Action("DescribeEvents")
DescribeParameterGroups = Action("DescribeParameterGroups")
DescribeParameters = Action("DescribeParameters")
DescribeReservedNodes = Action("DescribeReservedNodes")
DescribeReservedNodesOfferings = Action("DescribeReservedNodesOfferings")
DescribeServiceUpdates = Action("DescribeServiceUpdates")
DescribeSnapshots = Action("DescribeSnapshots")
DescribeSubnetGroups = Action("DescribeSubnetGroups")
DescribeUsers = Action("DescribeUsers")
FailoverShard = Action("FailoverShard")
ListAllowedNodeTypeUpdates = Action("ListAllowedNodeTypeUpdates")
ListNodeTypeUpdates = Action("ListNodeTypeUpdates")
ListTags = Action("ListTags")
PurchaseReservedNodesOffering = Action("PurchaseReservedNodesOffering")
ResetParameterGroup = Action("ResetParameterGroup")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateAcl = Action("UpdateAcl")
UpdateCluster = Action("UpdateCluster")
UpdateParameterGroup = Action("UpdateParameterGroup")
UpdateSubnetGroup = Action("UpdateSubnetGroup")
UpdateUser = Action("UpdateUser")
|
fc186b3e9c2aca8d42f4397c30a36f57b6696fc6
|
2c1dc7049d820d2b75811a6c0479bd34eb84ad87
|
/test/integration/smoke/test_iso.py
|
c592b6bc6060280612da5fc9ec05e147e40809b8
|
[
"GPL-2.0-only",
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
apache/cloudstack
|
3775c9171022dfaf91d655bd166149e36f4caa41
|
819dd7b75c1b61ae444c45476f5834dbfb9094d0
|
refs/heads/main
| 2023-08-30T15:05:36.976909
| 2023-08-30T09:29:16
| 2023-08-30T09:29:16
| 9,759,448
| 1,468
| 1,232
|
Apache-2.0
| 2023-09-14T16:57:46
| 2013-04-29T22:27:12
|
Java
|
UTF-8
|
Python
| false
| false
| 25,960
|
py
|
test_iso.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Templates ISO
"""
# Import Local Modules
from marvin.cloudstackException import GetDetailExceptionInfo
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.cloudstackAPI import listZones, updateIso, extractIso, updateIsoPermissions, copyIso, deleteIso,\
registerIso,listOsTypes
from marvin.lib.utils import cleanup_resources, random_gen, get_hypervisor_type,validateList
from marvin.lib.base import Account, Iso
from marvin.lib.common import (get_domain,
get_zone,
list_isos,
list_os_types)
from nose.plugins.attrib import attr
from marvin.codes import PASS
import urllib.request, urllib.parse, urllib.error
# Import System modules
import time
_multiprocess_shared_ = True
class TestCreateIso(cloudstackTestCase):
# TODO: SIMENH: check the existence of registered of ISO in secondary deploy a VM with registered ISO. can be added \
# as another test
def setUp(self):
self.services = self.testClient.getParsedTestDataConfig()
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
self.skipTest("ISOs are not supported on %s" % self.hypervisor)
# Get Zone, Domain and templates
self.domain = get_domain(self.apiclient)
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
self.services['mode'] = self.zone.networktype
self.services["domainid"] = self.domain.id
self.services["iso2"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
# Finding the OsTypeId from Ostype
ostypes = list_os_types(
self.apiclient,
description=self.services["ostype"]
)
if not isinstance(ostypes, list):
raise unittest.SkipTest("OSTypeId for given description not found")
self.services["iso1"]["ostypeid"] = ostypes[0].id
self.services["iso2"]["ostypeid"] = ostypes[0].id
self.services["ostypeid"] = ostypes[0].id
self.cleanup = [self.account]
return
def tearDown(self):
try:
# Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns"],
required_hardware="true")
def test_01_create_iso(self):
"""Test create public & private ISO
"""
# Validate the following:
# 1. database (vm_template table) should be
# updated with newly created ISO
# 2. UI should show the newly added ISO
# 3. listIsos API should show the newly added ISO
iso = Iso.create(
self.apiclient,
self.services["iso2"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("ISO created with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"
% (iso.id, e))
list_iso_response = list_isos(
self.apiclient,
id=iso.id
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template available in List ISOs"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.displaytext,
self.services["iso2"]["displaytext"],
"Check display text of newly created ISO"
)
self.assertEqual(
iso_response.zoneid,
self.services["iso2"]["zoneid"],
"Check zone ID of newly created ISO"
)
return
class TestISO(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestISO, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls._cleanup = []
cls.unsupportedHypervisor = False
cls.hypervisor = get_hypervisor_type(cls.apiclient)
if cls.hypervisor.lower() in ["simulator", "lxc"]:
cls.unsupportedHypervisor = True
return
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.services["domainid"] = cls.domain.id
cls.services["iso1"]["zoneid"] = cls.zone.id
cls.services["iso2"]["zoneid"] = cls.zone.id
cls.services["sourcezoneid"] = cls.zone.id
# populate second zone id for iso copy
cmd = listZones.listZonesCmd()
cls.zones = cls.apiclient.listZones(cmd)
if not isinstance(cls.zones, list):
raise Exception("Failed to find zones.")
# Create an account, ISOs etc.
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
# Finding the OsTypeId from Ostype
ostypes = list_os_types(
cls.apiclient,
description=cls.services["ostype"]
)
if not isinstance(ostypes, list):
raise unittest.SkipTest("OSTypeId for given description not found")
cls.services["iso1"]["ostypeid"] = ostypes[0].id
cls.services["iso2"]["ostypeid"] = ostypes[0].id
cls.services["ostypeid"] = ostypes[0].id
cls.iso_1 = Iso.create(
cls.apiclient,
cls.services["iso1"],
account=cls.account.name,
domainid=cls.account.domainid
)
try:
cls.iso_1.download(cls.apiclient)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"
% (cls.iso_1.id, e))
cls.iso_2 = Iso.create(
cls.apiclient,
cls.services["iso2"],
account=cls.account.name,
domainid=cls.account.domainid
)
try:
cls.iso_2.download(cls.apiclient)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"
% (cls.iso_2.id, e))
return
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestISO,
cls).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
def tearDown(self):
try:
# Clean up, terminate the created ISOs, VMs
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_iso_details(self, isoname):
# ListIsos to list default ISOS (VM and xen tools)
list_default_iso_response = list_isos(
self.apiclient,
name=isoname,
account="system",
isready="true"
)
status = validateList(list_default_iso_response)
self.assertEqual(
PASS,
status[0],
"Check if ISO exists in ListIsos")
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns",
"smoke"],
required_hardware="true")
def test_02_edit_iso(self):
"""Test Edit ISO
"""
# Validate the following:
# 1. UI should show the edited values for ISO
# 2. database (vm_template table) should have updated values
# Generate random values for updating ISO name and Display text
new_displayText = random_gen()
new_name = random_gen()
self.debug("Updating ISO permissions for ISO: %s" % self.iso_1.id)
cmd = updateIso.updateIsoCmd()
# Assign new values to attributes
cmd.id = self.iso_1.id
cmd.displaytext = new_displayText
cmd.name = new_name
cmd.bootable = self.services["bootable"]
cmd.passwordenabled = self.services["passwordenabled"]
self.apiclient.updateIso(cmd)
# Check whether attributes are updated in ISO using listIsos
list_iso_response = list_isos(
self.apiclient,
id=self.iso_1.id
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template available in List ISOs"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.displaytext,
new_displayText,
"Check display text of updated ISO"
)
self.assertEqual(
str(iso_response.bootable).lower(),
str(self.services["bootable"]).lower(),
"Check if image is bootable of updated ISO"
)
self.assertEqual(
iso_response.ostypeid,
self.services["ostypeid"],
"Check OSTypeID of updated ISO"
)
self.assertEqual(
iso_response.passwordenabled,
bool(self.services["passwordenabled"]),
"Check passwordenabled of updated ISO"
)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns"],
required_hardware="true")
def test_03_delete_iso(self):
"""Test delete ISO
"""
# Validate the following:
# 1. UI should not show the deleted ISP
# 2. database (vm_template table) should not contain deleted ISO
self.debug("Deleting ISO with ID: %s" % self.iso_1.id)
self.iso_1.delete(self.apiclient)
# Sleep to ensure that ISO state is reflected in other calls
time.sleep(self.services["sleep"])
# ListIsos to verify deleted ISO is properly deleted
list_iso_response = list_isos(
self.apiclient,
id=self.iso_1.id
)
self.assertEqual(
list_iso_response,
None,
"Check if ISO exists in ListIsos"
)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns"],
required_hardware="true")
def test_04_extract_Iso(self):
"Test for extract ISO"
# Validate the following
# 1. Admin should able extract and download the ISO
# 2. ListIsos should display all the public templates
# for all kind of users
# 3 .ListIsos should not display the system templates
self.debug("Extracting ISO with ID: %s" % self.iso_2.id)
cmd = extractIso.extractIsoCmd()
cmd.id = self.iso_2.id
cmd.mode = self.services["iso2"]["mode"]
cmd.zoneid = self.services["iso2"]["zoneid"]
list_extract_response = self.apiclient.extractIso(cmd)
try:
# Format URL to ASCII to retrieve response code
formatted_url = urllib.parse.unquote_plus(list_extract_response.url)
url_response = urllib.request.urlopen(formatted_url)
response_code = url_response.getcode()
except Exception:
self.fail(
"Extract ISO Failed with invalid URL %s (ISO id: %s)"
% (formatted_url, self.iso_2.id)
)
self.assertEqual(
list_extract_response.id,
self.iso_2.id,
"Check ID of the downloaded ISO"
)
self.assertEqual(
list_extract_response.extractMode,
self.services["iso2"]["mode"],
"Check mode of extraction"
)
self.assertEqual(
list_extract_response.zoneid,
self.services["iso2"]["zoneid"],
"Check zone ID of extraction"
)
self.assertEqual(
response_code,
200,
"Check for a valid response of download URL"
)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns",
"smoke",
"selfservice"])
def test_05_iso_permissions(self):
"""Update & Test for ISO permissions"""
# validate the following
# 1. listIsos returns valid permissions set for ISO
# 2. permission changes should be reflected in vm_template
# table in database
self.debug("Updating permissions for ISO: %s" % self.iso_2.id)
cmd = updateIsoPermissions.updateIsoPermissionsCmd()
cmd.id = self.iso_2.id
# Update ISO permissions
cmd.isfeatured = self.services["isfeatured"]
cmd.ispublic = self.services["ispublic"]
cmd.isextractable = self.services["isextractable"]
self.apiclient.updateIsoPermissions(cmd)
# Verify ListIsos have updated permissions for the ISO for normal user
list_iso_response = list_isos(
self.apiclient,
id=self.iso_2.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.id,
self.iso_2.id,
"Check ISO ID"
)
self.assertEqual(
str(iso_response.ispublic).lower(),
str(self.services["ispublic"]).lower(),
"Check ispublic permission of ISO"
)
self.assertEqual(
str(iso_response.isfeatured).lower(),
str(self.services["isfeatured"]).lower(),
"Check isfeatured permission of ISO"
)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns",
"smoke",
"multizone",
"provisioning"])
def test_06_copy_iso(self):
"""Test for copy ISO from one zone to another"""
# Validate the following
# 1. copy ISO should be successful and secondary storage
# should contain new copied ISO.
if len(self.zones) <= 1:
self.skipTest(
"Not enough zones available to perform copy template")
self.services["destzoneid"] = [z for z in self.zones if z.id != self.zone.id][0].id
self.debug("Copy ISO from %s to %s" % (
self.zone.id,
self.services["destzoneid"]
))
cmd = copyIso.copyIsoCmd()
cmd.id = self.iso_2.id
cmd.destzoneid = self.services["destzoneid"]
cmd.sourcezoneid = self.zone.id
self.apiclient.copyIso(cmd)
# Verify ISO is copied to another zone using ListIsos
list_iso_response = list_isos(
self.apiclient,
id=self.iso_2.id,
zoneid=self.services["destzoneid"]
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template extracted in List ISO"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.id,
self.iso_2.id,
"Check ID of the downloaded ISO"
)
self.assertEqual(
iso_response.zoneid,
self.services["destzoneid"],
"Check zone ID of the copied ISO"
)
self.debug("Cleanup copied ISO: %s" % iso_response.id)
# Cleanup- Delete the copied ISO
timeout = self.services["timeout"]
while True:
time.sleep(self.services["sleep"])
list_iso_response = list_isos(
self.apiclient,
id=self.iso_2.id,
zoneid=self.services["destzoneid"]
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template extracted in List ISO"
)
iso_response = list_iso_response[0]
if iso_response.isready:
break
if timeout == 0:
raise Exception(
"Failed to download copied iso(ID: %s)" % iso_response.id)
timeout = timeout - 1
cmd = deleteIso.deleteIsoCmd()
cmd.id = iso_response.id
cmd.zoneid = self.services["destzoneid"]
self.apiclient.deleteIso(cmd)
return
@attr(
tags=[
"advanced",
"basic",
"eip",
"sg",
"advancedns"],
required_hardware="false")
def test_07_list_default_iso(self):
"""Test delete ISO
"""
# Validate the following:
# list ISO should list default ISOS (VM and xen tools)
# ListIsos to list default ISOS (VM and xen tools)
self.get_iso_details("vmware-tools.iso")
self.get_iso_details("xs-tools.iso")
return
class TestCreateISOWithChecksum(cloudstackTestCase):
def setUp(self):
self.testClient = super(TestCreateISOWithChecksum, self).getClsTestClient()
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
self.unsupportedHypervisor = False
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
# Template creation from root volume is not supported in LXC
self.unsupportedHypervisor = True
return
# Get Zone, Domain and templates
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
# Setup default create iso attributes
self.iso = registerIso.registerIsoCmd()
self.iso.checksum = "{SHA-1}" + "e16f703b5d6cb6dd2c448d956be63fcbee7d79ea"
self.iso.zoneid = self.zone.id
self.iso.name = 'test-tynyCore-iso'
self.iso.displaytext = 'test-tynyCore-iso'
self.iso.url = "http://dl.openvm.eu/cloudstack/iso/TinyCore-8.0.iso"
self.iso.ostypeid = self.getOsType("Other Linux (64-bit)")
self.md5 = "f7fee34a73a7f8e3adb30778c7c32c51"
self.sha256 = "069a22f7cc15b34cd39f6dd61ef0cf99ff47a1a92942772c30f50988746517f7"
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
try:
# Clean up the created templates
for temp in self.cleanup:
cmd = deleteIso.deleteIsoCmd()
cmd.id = temp.id
cmd.zoneid = self.zone.id
self.apiclient.deleteIso(cmd)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_01_create_iso_with_checksum_sha1(self):
iso = self.registerIso(self.iso)
self.download(self.apiclient, iso.id)
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_02_create_iso_with_checksum_sha256(self):
self.iso.checksum = "{SHA-256}" + self.sha256
iso = self.registerIso(self.iso)
self.download(self.apiclient, iso.id)
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_03_create_iso_with_checksum_md5(self):
self.iso.checksum = "{md5}" + self.md5
iso = self.registerIso(self.iso)
self.download(self.apiclient, iso.id)
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_01_1_create_iso_with_checksum_sha1_negative(self):
self.iso.checksum = "{sha-1}" + "someInvalidValue"
iso = self.registerIso(self.iso)
try:
self.download(self.apiclient, iso.id)
except Exception as e:
print("Negative Test Passed - Exception Occurred Under iso download " \
"%s" % GetDetailExceptionInfo(e))
else:
self.fail("Negative Test Failed - Exception DID NOT Occurred Under iso download ")
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_02_1_create_iso_with_checksum_sha256_negative(self):
self.iso.checksum = "{SHA-256}" + "someInvalidValue"
iso = self.registerIso(self.iso)
try:
self.download(self.apiclient, iso.id)
except Exception as e:
print("Negative Test Passed - Exception Occurred Under iso download " \
"%s" % GetDetailExceptionInfo(e))
else:
self.fail("Negative Test Failed - Exception DID NOT Occurred Under iso download ")
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_03_1_create_iso_with_checksum_md5_negative(self):
self.iso.checksum = "{md5}" + "someInvalidValue"
iso = self.registerIso(self.iso)
try:
self.download(self.apiclient, iso.id)
except Exception as e:
print("Negative Test Passed - Exception Occurred Under iso download " \
"%s" % GetDetailExceptionInfo(e))
else:
self.fail("Negative Test Failed - Exception DID NOT Occurred Under iso download ")
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_04_create_iso_with_no_checksum(self):
self.iso.checksum = None
iso = self.registerIso(self.iso)
self.download(self.apiclient, iso.id)
def registerIso(self, cmd):
iso = self.apiclient.registerIso(cmd)[0]
self.cleanup.append(iso)
return iso
def getOsType(self, param):
cmd = listOsTypes.listOsTypesCmd()
cmd.description = param
return self.apiclient.listOsTypes(cmd)[0].id
def download(self, apiclient, iso_id, retries=12, interval=5):
"""Check if template download will finish in 1 minute"""
while retries > -1:
time.sleep(interval)
iso_response = Iso.list(
apiclient,
id=iso_id
)
if isinstance(iso_response, list):
iso = iso_response[0]
if not hasattr(iso, 'status') or not iso or not iso.status:
retries = retries - 1
continue
# If iso is ready,
# iso.status = Download Complete
# Downloading - x% Downloaded
# if Failed
# Error - Any other string
if 'Failed' in iso.status:
raise Exception(
"Failed to download iso: status - %s" %
iso.status)
elif iso.status == 'Successfully Installed' and iso.isready:
return
elif 'Downloaded' in iso.status:
retries = retries - 1
continue
elif 'Installing' not in iso.status:
if retries >= 0:
retries = retries - 1
continue
raise Exception(
"Error in downloading iso: status - %s" %
iso.status)
else:
retries = retries - 1
raise Exception("Template download failed exception.")
|
eba3b3e70ddc92d86bec5ca46fcd7a9d606a6026
|
af2082be2d5cbe39f41b82e84e22bbdf79b49639
|
/tests/decorator/test_allow_headers.py
|
6ad3a0074eacd16fc13a4e786a0b8d3b82549123
|
[
"MIT"
] |
permissive
|
corydolphin/flask-cors
|
1e0cf851ae66f71cf60a025889f6096a551b4601
|
dbabb27884f55d0328482ba92c51ca8397c58974
|
refs/heads/main
| 2023-08-27T08:37:14.485853
| 2023-07-29T16:50:30
| 2023-07-29T16:50:30
| 11,726,155
| 821
| 156
|
MIT
| 2023-08-27T22:10:49
| 2013-07-28T21:33:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
test_allow_headers.py
|
# -*- coding: utf-8 -*-
"""
test
~~~~
Flask-Cors tests module
"""
from ..base_test import FlaskCorsTestCase
from flask import Flask
from flask_cors import *
from flask_cors.core import *
class AllowHeadersTestCaseIntegration(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
@self.app.route('/test_default')
@cross_origin()
def test_default():
return 'Welcome!'
@self.app.route('/test_allow_headers')
@cross_origin(allow_headers=['X-Example-Header-B',
'X-Example-Header-A'])
def test_allow_headers():
return 'Welcome!'
@self.app.route('/test_allow_headers_regex')
@cross_origin(allow_headers=[r'X-COMPANY-.*'])
def test_allow_headers_regex():
return 'Welcome!'
def test_default(self):
for resp in self.iter_responses('/test_default'):
self.assertTrue(resp.headers.get(ACL_ALLOW_HEADERS) is None,
"Default should have no allowed headers")
def test_allow_headers_no_request_headers(self):
'''
No ACL_REQUEST_HEADERS sent, ACL_ALLOW_HEADERS should be empty
'''
resp = self.preflight('/test_allow_headers', origin='www.example.com')
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS), None)
def test_allow_headers_with_request_headers(self):
'''
If there is an Access-Control-Request-Method header in the request
and Access-Control-Request-Method is allowed for cross origin
requests and request method is OPTIONS, and every element in the
Access-Control-Request-Headers is an allowed header, the
Access-Control-Allow-Headers header should be echoed back.
'''
resp = self.preflight('/test_allow_headers',
origin='www.example.com',
cors_request_headers=['X-Example-Header-A'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS),
'X-Example-Header-A')
def test_allow_headers_with_request_headers_case_insensitive(self):
'''
HTTP headers are case insensitive. We should respect that
and match regardless of case, returning the casing sent by
the client
'''
resp = self.preflight('/test_allow_headers',
origin='www.example.com',
cors_request_headers=['X-Example-header-a'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS),
'X-Example-header-a')
def test_allow_headers_with_unmatched_request_headers(self):
'''
If every element in the Access-Control-Request-Headers is not an
allowed header, then the matching headers should be returned.
'''
resp = self.preflight('/test_allow_headers',
origin='www.example.com',
cors_request_headers=['X-Not-Found-Header'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS), None)
resp = self.preflight('/test_allow_headers',
origin='www.example.com',
cors_request_headers=['X-Example-Header-A',
'X-Not-Found-Header'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS),
'X-Example-Header-A')
def test_allow_headers_regex(self):
'''
If every element in the Access-Control-Request-Headers is not an
allowed header, then the matching headers should be returned.
'''
resp = self.preflight('/test_allow_headers_regex',
origin='www.example.com',
cors_request_headers=['X-COMPANY-FOO'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS), 'X-COMPANY-FOO')
resp = self.preflight('/test_allow_headers_regex',
origin='www.example.com',
cors_request_headers=['X-Not-Found-Header'])
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS), None)
if __name__ == "__main__":
unittest.main()
|
a319b01e4011ee748fa94ddd736ca626725ae710
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/monitor/aaz/latest/monitor/account/_wait.py
|
ef7447fcce097294495be33ddde74b94b04d349f
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 12,289
|
py
|
_wait.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor account wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.monitor/accounts/{}", "2023-04-03"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.azure_monitor_workspace_name = AAZStrArg(
options=["-n", "--name", "--azure-monitor-workspace-name"],
help="The name of the Azure Monitor workspace. The name is case insensitive",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^(?!-)[a-zA-Z0-9-]+[^-]$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.AzureMonitorWorkspacesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class AzureMonitorWorkspacesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Monitor/accounts/{azureMonitorWorkspaceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"azureMonitorWorkspaceName", self.ctx.args.azure_monitor_workspace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-03",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_WaitHelper._build_schema_system_data_read(_schema_on_200.system_data)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.account_id = AAZStrType(
serialized_name="accountId",
flags={"read_only": True},
)
properties.default_ingestion_settings = AAZObjectType(
serialized_name="defaultIngestionSettings",
flags={"read_only": True},
)
properties.metrics = AAZObjectType(
flags={"read_only": True},
)
properties.private_endpoint_connections = AAZListType(
serialized_name="privateEndpointConnections",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_network_access = AAZStrType(
serialized_name="publicNetworkAccess",
flags={"read_only": True},
)
default_ingestion_settings = cls._schema_on_200.properties.default_ingestion_settings
default_ingestion_settings.data_collection_endpoint_resource_id = AAZStrType(
serialized_name="dataCollectionEndpointResourceId",
flags={"read_only": True},
)
default_ingestion_settings.data_collection_rule_resource_id = AAZStrType(
serialized_name="dataCollectionRuleResourceId",
flags={"read_only": True},
)
metrics = cls._schema_on_200.properties.metrics
metrics.internal_id = AAZStrType(
serialized_name="internalId",
flags={"read_only": True},
)
metrics.prometheus_query_endpoint = AAZStrType(
serialized_name="prometheusQueryEndpoint",
flags={"read_only": True},
)
private_endpoint_connections = cls._schema_on_200.properties.private_endpoint_connections
private_endpoint_connections.Element = AAZObjectType()
_element = cls._schema_on_200.properties.private_endpoint_connections.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_WaitHelper._build_schema_system_data_read(_element.system_data)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties.private_endpoint_connections.Element.properties
properties.group_ids = AAZListType(
serialized_name="groupIds",
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
group_ids = cls._schema_on_200.properties.private_endpoint_connections.Element.properties.group_ids
group_ids.Element = AAZStrType()
private_endpoint = cls._schema_on_200.properties.private_endpoint_connections.Element.properties.private_endpoint
private_endpoint.id = AAZStrType(
flags={"read_only": True},
)
private_link_service_connection_state = cls._schema_on_200.properties.private_endpoint_connections.Element.properties.private_link_service_connection_state
private_link_service_connection_state.actions_required = AAZStrType(
serialized_name="actionsRequired",
)
private_link_service_connection_state.description = AAZStrType()
private_link_service_connection_state.status = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
_schema_system_data_read = None
@classmethod
def _build_schema_system_data_read(cls, _schema):
if cls._schema_system_data_read is not None:
_schema.created_at = cls._schema_system_data_read.created_at
_schema.created_by = cls._schema_system_data_read.created_by
_schema.created_by_type = cls._schema_system_data_read.created_by_type
_schema.last_modified_at = cls._schema_system_data_read.last_modified_at
_schema.last_modified_by = cls._schema_system_data_read.last_modified_by
_schema.last_modified_by_type = cls._schema_system_data_read.last_modified_by_type
return
cls._schema_system_data_read = _schema_system_data_read = AAZObjectType(
flags={"read_only": True}
)
system_data_read = _schema_system_data_read
system_data_read.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data_read.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data_read.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data_read.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data_read.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data_read.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
_schema.created_at = cls._schema_system_data_read.created_at
_schema.created_by = cls._schema_system_data_read.created_by
_schema.created_by_type = cls._schema_system_data_read.created_by_type
_schema.last_modified_at = cls._schema_system_data_read.last_modified_at
_schema.last_modified_by = cls._schema_system_data_read.last_modified_by
_schema.last_modified_by_type = cls._schema_system_data_read.last_modified_by_type
__all__ = ["Wait"]
|
bc0ba911b2c6de2865c081c68a557c5b4b31fb62
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/fribidi/template.py
|
7ddacd176020cff1cb553e8dd1ab6ba408f583cc
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
template.py
|
pkgname = "fribidi"
pkgver = "1.0.13"
pkgrel = 0
build_style = "meson"
configure_args = ["-Ddocs=false"]
hostmakedepends = ["meson", "pkgconf"]
pkgdesc = "Free implementation of the Unicode Bidirectional Algorithm"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://github.com/fribidi/fribidi"
source = f"{url}/archive/v{pkgver}.tar.gz"
sha256 = "f24e8e381bcf76533ae56bd776196f3a0369ec28e9c0fdb6edd163277e008314"
hardening = ["vis", "cfi"]
@subpackage("fribidi-devel")
def _devel(self):
return self.default_devel()
|
ef4107a48e1736d5cef5e4b04f5b27b88414f5e2
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowTemplateServiceSourceUser/cli/equal/golden_output_1_expected.py
|
ec97976ceac68bf132c8dcaad6dbde621c187cf3
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
golden_output_1_expected.py
|
expected_output = {
'template': {
'webauth-global-inactive': {
'description': 'NONE',
'vnid': 'NONE',
'mdns_policy': 'NONE!',
'inactivity_timer': '3600 sec'
},
'sdsdsds': {
'description': 'dsds sdsds',
'vlan': 5,
'vnid': 'sjdsnd',
'mdns_policy': 'NONE!'
}
}
}
|
205166fef569b17475d2777f35320a176377c9d8
|
3db6fdd04e30dd8c7c524a3436d5c1b413c47787
|
/jsonschema2db.py
|
71a792d5a7a914f46a73b6765c3b742f1eb8f416
|
[
"MIT"
] |
permissive
|
better/jsonschema2db
|
a6db89c92166b3604c74024284b876b28ff65f58
|
fd1c83e45c728eed78cf06b7db8cb5f662b72159
|
refs/heads/master
| 2023-04-12T18:16:37.038808
| 2022-02-07T20:18:13
| 2022-02-07T20:18:13
| 100,424,905
| 275
| 61
|
MIT
| 2023-04-07T10:22:08
| 2017-08-15T22:37:42
|
Python
|
UTF-8
|
Python
| false
| false
| 23,995
|
py
|
jsonschema2db.py
|
import change_case
import csv
import datetime
import iso8601
import json
import os
import random
import sys
import tempfile
import warnings
class JSONSchemaToDatabase:
'''JSONSchemaToDatabase is the mother class for everything
:param schema: The JSON schema, as a native Python dict
:param database_flavor: Either "postgres" or "redshift"
:param postgres_schema: (optional) A string denoting a postgres schema (namespace) under which all tables will be created
:param debug: (optional) Set this to True if you want all queries to be printed to stderr
:param item_col_name: (optional) The name of the main object key (default is 'item_id')
:param item_col_type: (optional) Type of the main object key (uses the type identifiers from JSON Schema). Default is 'integer'
:param prefix_col_name: (optional) Postgres column name identifying the subpaths in the object (default is 'prefix')
:param abbreviations: (optional) A string to string mapping containing replacements applied to each part of the path
:param extra_columns: (optional) A list of pairs representing extra columns in the root table. The format is ('column_name', 'type')
:param root_table: (optional) Name of the root table
:param s3_client: (optional, Redshift only) A boto3 client object used for copying data through S3 (if not provided then it will use INSERT statements, which can be very slow)
:param s3_bucket: (optional, Redshift only) Required with s3_client
:param s3_prefix: (optional, Redshift only) Optional subdirectory within the S3 bucket
:param s3_iam_arn: (optional, Redshift only) Extra IAM argument
Typically you want to instantiate a `JSONSchemaToPostgres` object, and run :func:`create_tables` to create all the tables. After that, insert all data using :func:`insert_items`. Once you're done inserting, run :func:`create_links` to populate all references properly and add foreign keys between tables. Optionally you can run :func:`analyze` finally which optimizes the tables.
'''
def __init__(self, schema, database_flavor, postgres_schema=None, debug=False,
item_col_name='item_id', item_col_type='integer', prefix_col_name='prefix',
abbreviations={}, extra_columns=[], root_table='root',
s3_client=None, s3_bucket=None, s3_prefix='jsonschema2db', s3_iam_arn=None):
self._database_flavor = database_flavor
self._debug = debug
self._table_definitions = {}
self._links = {}
self._backlinks = {}
self._postgres_schema = postgres_schema
self._item_col_name = item_col_name
self._item_col_type = item_col_type
self._prefix_col_name = prefix_col_name
self._abbreviations = abbreviations
self._extra_columns = extra_columns
self._table_comments = {}
self._column_comments = {}
self._root_table = root_table
# Redshift-specific properties
self._s3_client = s3_client
self._s3_bucket = s3_bucket
self._s3_prefix = s3_prefix
self._s3_iam_arn = s3_iam_arn
# Various counters used for diagnostics during insertions
self.failure_count = {} # path -> count
self.json_path_count = {} # json path -> count
# Walk the schema and build up the translation tables
self._translation_tree = self._traverse(schema, schema, table=self._root_table, comment=schema.get('comment'))
# Need to compile all the backlinks that uniquely identify a parent and add columns for them
for child_table in self._backlinks:
if len(self._backlinks[child_table]) != 1:
# Need a unique path on the parent table for this to make sense
continue
parent_table, ref_col_name, _ = list(self._backlinks[child_table])[0]
self._backlinks[child_table] = (parent_table, ref_col_name)
self._table_definitions[child_table][ref_col_name] = 'link'
self._links.setdefault(child_table, {})[ref_col_name] = (None, parent_table)
# Construct tables and columns
self._table_columns = {}
max_column_length = {'postgres': 63, 'redshift': 127}[self._database_flavor]
for col, type in self._extra_columns:
if 0 < len(col) <= max_column_length:
self._table_definitions[self._root_table][col] = type
for table, column_types in self._table_definitions.items():
for column in column_types.keys():
if len(column) > max_column_length:
warnings.warn('Ignoring_column because it is too long: %s.%s' % (table, column))
columns = sorted(col for col in column_types.keys() if 0 < len(col) <= max_column_length)
self._table_columns[table] = columns
def _table_name(self, path):
return '__'.join(change_case.ChangeCase.camel_to_snake(self._abbreviations.get(p, p)) for p in path)
def _column_name(self, path):
return self._table_name(path) # same
def _execute(self, cursor, query, args=None, query_ok_to_print=True):
if self._debug and query_ok_to_print:
print(query, file=sys.stderr)
cursor.execute(query, args)
def _traverse(self, schema, tree, path=tuple(), table='root', parent=None, comment=None, json_path=tuple()):
# Computes a bunch of stuff
# 1. A list of tables and columns (used to create tables dynamically)
# 2. A tree (dicts of dicts) with a mapping for each fact into tables (used to map data)
# 3. Links between entities
if type(tree) != dict:
warnings.warn('%s.%s: Broken subtree' % (table, self._column_name(path)))
return
if parent is not None:
self._backlinks.setdefault(table, set()).add(parent)
if table not in self._table_definitions:
self._table_definitions[table] = {}
if comment:
self._table_comments[table] = comment
definition = None
new_json_path = json_path
while '$ref' in tree:
ref = tree['$ref']
p = ref.lstrip('#').lstrip('/').split('/')
tree = schema
for elem in p:
if elem not in tree:
warnings.warn('%s.%s: Broken definition: %s' % (table, self._column_name(path), ref))
return
tree = tree[elem]
new_json_path = ('#',) + tuple(p)
definition = p[-1] # TODO(erikbern): we should just make this a boolean variable
special_keys = set(tree.keys()).intersection(['oneOf', 'allOf', 'anyOf'])
if special_keys:
res = {}
for p in special_keys:
for q in tree[p]:
res.update(self._traverse(schema, q, path, table, json_path=new_json_path))
return res # This is a special node, don't store any more information
elif 'enum' in tree:
self._table_definitions[table][self._column_name(path)] = 'enum'
if 'comment' in tree:
self._column_comments.setdefault(table, {})[self._column_name(path)] = tree['comment']
res = {'_column': self._column_name(path), '_type': 'enum'}
elif 'type' not in tree:
res = {}
warnings.warn('%s.%s: Type info missing' % (table, self._column_name(path)))
elif tree['type'] == 'object':
print('object:', tree)
res = {}
if 'patternProperties' in tree:
# Always create a new table for the pattern properties
if len(tree['patternProperties']) > 1:
warnings.warn('%s.%s: Multiple patternProperties, will ignore all except first' % (table, self._column_name(path)))
for p in tree['patternProperties']:
ref_col_name = table + '_id'
res['*'] = self._traverse(schema, tree['patternProperties'][p], tuple(), self._table_name(path), (table, ref_col_name, self._column_name(path)), tree.get('comment'), new_json_path + (p,))
break
elif 'properties' in tree:
if definition:
# This is a shared definition, so create a new table (if not already exists)
if path == tuple():
ref_col_name = self._table_name([definition]) + '_id'
else:
ref_col_name = self._column_name(path) + '_id'
for p in tree['properties']:
res[p] = self._traverse(schema, tree['properties'][p], (p, ), self._table_name([definition]), (table, ref_col_name, self._column_name(path)), tree.get('comment'), new_json_path + (p,))
self._table_definitions[table][ref_col_name] = 'link'
self._links.setdefault(table, {})[ref_col_name] = ('/'.join(path), self._table_name([definition]))
else:
# Standard object, just traverse recursively
for p in tree['properties']:
res[p] = self._traverse(schema, tree['properties'][p], path + (p,), table, parent, tree.get('comment'), new_json_path + (p,))
else:
warnings.warn('%s.%s: Object with neither properties nor patternProperties' % (table, self._column_name(path)))
else:
if tree['type'] == 'null':
res = {}
elif tree['type'] not in ['string', 'boolean', 'number', 'integer']:
warnings.warn('%s.%s: Type error: %s' % (table, self._column_name(path), tree['type']))
res = {}
else:
if definition in ['date', 'timestamp']:
t = definition
else:
t = tree['type']
self._table_definitions[table][self._column_name(path)] = t
if 'comment' in tree:
self._column_comments.setdefault(table, {})[self._column_name(path)] = tree['comment']
res = {'_column': self._column_name(path), '_type': t}
res['_table'] = table
res['_suffix'] = '/'.join(path)
res['_json_path'] = '/'.join(json_path)
self.json_path_count['/'.join(json_path)] = 0
return res
def _coerce_type(self, t, value):
''' Returns a two-tuple (is_valid, new_value) where new_value is properly coerced. '''
try:
if t == 'number':
return type(value) != bool, float(value)
elif t == 'integer':
return type(value) != bool, int(value)
elif t == 'boolean':
return type(value) == bool, value
elif t == 'timestamp':
if type(value) == datetime.datetime:
return True, value
return True, iso8601.parse_date(value)
elif t == 'date':
if type(value) == datetime.date:
return True, value
return True, datetime.date(*(int(z) for z in value.split('-')))
elif t == 'string':
# Allow coercing ints/floats, but nothing else
return type(value) in [str, int, float], str(value)
elif t == 'enum':
return type(value) == str, str(value)
except:
pass
return False, None
def _flatten_dict(self, data, res=None, path=tuple()):
if res is None:
res = []
if type(data) == dict:
for k, v in data.items():
self._flatten_dict(v, res, path+(k,))
else:
res.append((path, data))
return res
def _postgres_table_name(self, table):
if self._postgres_schema is None:
return '"%s"' % table
else:
return '"%s"."%s"' % (self._postgres_schema, table)
def create_tables(self, con):
'''Creates tables
:param con: psycopg2 connection object
'''
postgres_types = {'boolean': 'bool', 'number': 'float', 'string': 'text', 'enum': 'text', 'integer': 'bigint', 'timestamp': 'timestamptz', 'date': 'date', 'link': 'integer'}
with con.cursor() as cursor:
if self._postgres_schema is not None:
self._execute(cursor, 'drop schema if exists %s cascade' % self._postgres_schema)
self._execute(cursor, 'create schema %s' % self._postgres_schema)
for table, columns in self._table_columns.items():
types = [self._table_definitions[table][column] for column in columns]
id_data_type = {'postgres': 'serial', 'redshift': 'int identity(1, 1) not null'}[self._database_flavor]
create_q = 'create table %s (id %s, "%s" %s not null, "%s" text not null, %s unique ("%s", "%s"), unique (id))' % \
(self._postgres_table_name(table), id_data_type, self._item_col_name, postgres_types[self._item_col_type], self._prefix_col_name,
''.join('"%s" %s, ' % (c, postgres_types[t]) for c, t in zip(columns, types)),
self._item_col_name, self._prefix_col_name)
self._execute(cursor, create_q)
if table in self._table_comments:
self._execute(cursor, 'comment on table %s is %%s' % self._postgres_table_name(table), (self._table_comments[table],))
for c in columns:
if c in self._column_comments.get(table, {}):
self._execute(cursor, 'comment on column %s."%s" is %%s' % (self._postgres_table_name(table), c), (self._column_comments[table][c],))
def _insert_items_generate_rows(self, items, extra_items, count):
# Helper function to generate data row by row for insertion
for item_id, data in items:
if type(data) == dict:
data = self._flatten_dict(data)
res = {}
for path, value in data:
if value is None:
continue
subtree = self._translation_tree
res.setdefault(subtree['_table'], {}).setdefault('', {})
if count:
self.json_path_count[subtree['_json_path']] += 1
for index, path_part in enumerate(path):
if '*' in subtree:
subtree = subtree['*']
elif not subtree.get(path_part):
if count:
self.failure_count[path] = self.failure_count.get(path, 0) + 1
break
else:
subtree = subtree[path_part]
# Compute the prefix, add an empty entry (TODO: should make the prefix customizeable)
table, suffix = subtree['_table'], subtree['_suffix']
prefix_suffix = '/' + '/'.join(path[:(index+1)])
assert prefix_suffix.endswith(suffix)
prefix = prefix_suffix[:len(prefix_suffix)-len(suffix)].rstrip('/')
res.setdefault(table, {}).setdefault(prefix, {})
if count:
self.json_path_count[subtree['_json_path']] += 1
# Leaf node with value, validate and prepare for insertion
if '_column' not in subtree:
if count:
self.failure_count[path] = self.failure_count.get(path, 0) + 1
continue
col, t = subtree['_column'], subtree['_type']
if table not in self._table_columns:
if count:
self.failure_count[path] = self.failure_count.get(path, 0) + 1
continue
is_valid, new_value = self._coerce_type(t, value)
if not is_valid:
if count:
self.failure_count[path] = self.failure_count.get(path, 0) + 1
continue
res.setdefault(table, {}).setdefault(prefix, {})[col] = new_value
for table, table_values in res.items():
if table == self._root_table and item_id in extra_items:
res[table][''].update(extra_items[item_id])
# Compile table rows for this item
for table, table_values in res.items():
for prefix, row_values in table_values.items():
row_array = [item_id, prefix] + [row_values.get(t) for t in self._table_columns[table]]
yield (table, row_array)
def insert_items(self, con, items, extra_items={}, mutate=True, count=False):
''' Inserts data into database.
:param con: psycopg2 connection object
:param items: is an iterable of tuples `(item id, values)` where `values` is either:
- A nested dict conforming to the JSON spec
- A list (or iterator) of pairs where the first item in the pair is a tuple specifying the path, and the second value in the pair is the value.
:param extra_items: A dictionary containing values for extra columns, where key is an extra column name.
:param mutate: If this is set to `False`, nothing is actually inserted. This might be useful if you just want to validate data.
:param count: if set to `True`, it will count some things. Defaults to `False`.
Updates `self.failure_count`, a dict counting the number of failures for paths (keys are tuples, values are integers).
This function has an optimized strategy for Redshift, where it writes the data to temporary files, copies those to S3, and uses the `COPY`
command to ingest the data into Redshift. However this strategy is only used if the `s3_client` is provided to the constructor.
Otherwise, it will fall back to the Postgres-based method of running batched insertions.
Note that the Postgres-based insertion builds up huge intermediary datastructures, so it will take a lot more memory.
'''
rows = self._insert_items_generate_rows(items=items, extra_items=extra_items, count=count)
if not mutate:
for table, row in rows:
# Just exhaust the iterator
pass
elif self._database_flavor == 'redshift' and self._s3_client:
with tempfile.TemporaryDirectory() as tmpdirname, con.cursor() as cursor:
# Flush the iterator to temporary files on disk
temp_files, writers, file_objs = {}, {}, []
for table, row in rows:
if table not in temp_files:
fn = temp_files[table] = os.path.join(tmpdirname, table + '.csv')
f = open(fn, 'wt')
writer = csv.writer(f)
if self._debug:
print('Creating temp file for table', table, 'at', fn, file=sys.stderr)
writers[table] = writer
file_objs.append(f)
writers[table].writerow(row)
# Close local temp files so all data gets flushed to disk
for f in file_objs:
f.close()
# Upload all files to S3 and load into Redshift
# TODO: might want to use a thread pool for this
batch_random = '%012d' % random.randint(0, 999999999999)
for table, fn in temp_files.items():
s3_path = '/%s/%s/%s.csv' % (self._s3_prefix, batch_random, table)
if self._debug:
print('Uploading data for table %s from %s (%d bytes) to %s' % (table, fn, os.path.getsize(fn), s3_path), file=sys.stderr)
self._s3_client.upload_file(Filename=fn, Bucket=self._s3_bucket, Key=s3_path)
query = 'copy %s from \'s3://%s/%s\' csv %s truncatecolumns compupdate off statupdate off' % (
self._postgres_table_name(table),
self._s3_bucket, s3_path, self._s3_iam_arn and 'iam_role \'%s\'' % self._s3_iam_arn or '')
self._execute(cursor, query)
else:
# Postgres-based insertion
with con.cursor() as cursor:
data_by_table = {}
for table, row in rows:
# Note that this flushes the iterator into an in-memory datastructure, so it will be far less memory efficient than the Redshift strategy
data_by_table.setdefault(table, []).append(row)
for table, data in data_by_table.items():
cols = '("%s","%s"%s)' % (self._item_col_name, self._prefix_col_name, ''.join(',"%s"' % c for c in self._table_columns[table]))
pattern = '(' + ','.join(['%s'] * len(data[0])) + ')'
args = b','.join(cursor.mogrify(pattern, tup) for tup in data)
self._execute(cursor, b'insert into %s %s values %s' % (self._postgres_table_name(table).encode(), cols.encode(), args), query_ok_to_print=False)
def create_links(self, con):
'''Adds foreign keys between tables.'''
for from_table, cols in self._links.items():
for ref_col_name, (prefix, to_table) in cols.items():
if from_table not in self._table_columns or to_table not in self._table_columns:
continue
args = {
'from_table': self._postgres_table_name(from_table),
'to_table': self._postgres_table_name(to_table),
'ref_col': ref_col_name,
'item_col': self._item_col_name,
'prefix_col': self._prefix_col_name,
'prefix': prefix,
}
update_q = 'update %(from_table)s set "%(ref_col)s" = to_table.id from (select "%(item_col)s", "%(prefix_col)s", id from %(to_table)s) to_table' % args
if prefix:
# Forward reference from table to a definition
update_q += ' where %(from_table)s."%(item_col)s" = to_table."%(item_col)s" and %(from_table)s."%(prefix_col)s" || \'/%(prefix)s\' = to_table."%(prefix_col)s"' % args
else:
# Backward definition from a table to its patternProperty parent
update_q += ' where %(from_table)s."%(item_col)s" = to_table."%(item_col)s" and strpos(%(from_table)s."%(prefix_col)s", to_table."%(prefix_col)s") = 1' % args
alter_q = 'alter table %(from_table)s add constraint fk_%(ref_col)s foreign key ("%(ref_col)s") references %(to_table)s (id)' % args
with con.cursor() as cursor:
self._execute(cursor, update_q)
self._execute(cursor, alter_q)
def analyze(self, con):
'''Runs `analyze` on each table. This improves performance.
See the `Postgres documentation for Analyze <https://www.postgresql.org/docs/9.1/static/sql-analyze.html>`_
'''
with con.cursor() as cursor:
for table in self._table_columns.keys():
self._execute(cursor, 'analyze %s' % self._postgres_table_name(table))
class JSONSchemaToPostgres(JSONSchemaToDatabase):
'''Shorthand for JSONSchemaToDatabase(..., database_flavor='postgres')'''
def __init__(self, *args, **kwargs):
kwargs['database_flavor'] = 'postgres'
return super(JSONSchemaToPostgres, self).__init__(*args, **kwargs)
class JSONSchemaToRedshift(JSONSchemaToDatabase):
'''Shorthand for JSONSchemaToDatabase(..., database_flavor='redshift')'''
def __init__(self, *args, **kwargs):
kwargs['database_flavor'] = 'redshift'
return super(JSONSchemaToRedshift, self).__init__(*args, **kwargs)
|
6d0e4d46146f291d8aa9760a5abc9a0614c5a7aa
|
a29b8d6ae6642ef80d04ae99d721b703de06db69
|
/maro/forecasting/__init__.py
|
583f4de4e05019bbd7c4a464b6406135bcf81083
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/maro
|
6aab1a4e86fddabf7f242f0d1020d985a5f7a5f3
|
b3c6a589ad9036b03221e776a6929b2bc1eb4680
|
refs/heads/master
| 2023-08-24T16:52:38.250279
| 2023-05-15T04:31:58
| 2023-05-15T04:31:58
| 230,389,247
| 764
| 158
|
MIT
| 2023-07-25T20:59:06
| 2019-12-27T06:48:27
|
Python
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
__init__.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .moving_average import OneStepFixWindowMA
|
f4a7fa0114a0897069e338d8303a3653f15d1f95
|
a69294c7d5ee75441759b66bc20cce727350bd59
|
/ndb/django_middleware.py
|
fce0682f3626843d25ce04b73363d4ab673b8206
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/datastore-ndb-python
|
58b57437e11104bfe924fca3de2ee28319f2976f
|
59cb209ed95480025d26531fc91397575438d2fe
|
refs/heads/master
| 2023-08-21T01:16:54.021744
| 2022-10-20T23:12:51
| 2022-10-20T23:12:51
| 37,215,291
| 127
| 56
|
Apache-2.0
| 2022-10-20T23:12:53
| 2015-06-10T18:34:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,192
|
py
|
django_middleware.py
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
|
16e527638d82a544b7b7beee03ad1b686e6d6214
|
e25d0450995854b66f9909f4e47634ee5d0d232b
|
/3rdparty/intx/test/fuzzer/decode.py
|
aa942d47189ea3a6fdc4d3eed1ce637edb56d284
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/eEVM
|
d846257ff7155c2c6c294aa6c58a14d4bc7398b8
|
05efed8658b4e10a21253df8408b1f9bdb6f1445
|
refs/heads/main
| 2023-08-27T01:15:59.048257
| 2023-07-10T13:19:39
| 2023-07-10T13:19:39
| 154,743,215
| 249
| 53
|
MIT
| 2023-07-10T13:19:41
| 2018-10-25T22:03:09
|
C++
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
decode.py
|
#!/usr/bin/env python3
import os
import sys
ops_filter = ()
ops = ('/', '*', '<<', '>>', '+', '-', 's/')
def err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def decode_file(file):
with open(file, 'rb') as f:
print("Decoding {}".format(file))
decode_data(f.read())
def decode_data(data):
arg_size = (len(data) - 1) // 2
if arg_size not in (16, 32, 64):
err("Incorrect argument size: {}".format(arg_size))
return
op_index = int(data[0])
if op_index >= len(ops):
return
op = ops[op_index]
if ops_filter and op not in ops_filter:
return
x = int.from_bytes(data[1:1 + arg_size], byteorder='big')
y = int.from_bytes(data[1 + arg_size:], byteorder='big')
print("argument size: {}".format(arg_size))
print(x, op, y)
print(hex(x), op, hex(y))
if op in ('/', 's/'):
print("Test:")
print("{")
print(" {}_u512,".format(hex(x)))
print(" {}_u512,".format(hex(y)))
print(" {}_u512,".format(hex(x // y)))
print(" {}_u512,".format(hex(x % y)))
print("},")
if op == 's/':
ax = (-x) % 2**512
ay = (-y) % 2**512
print("Test:")
print("{")
print(" {}_u512,".format(hex(ax)))
print(" {}_u512,".format(hex(ay)))
print(" {}_u512,".format(hex(ax // ay)))
print(" {}_u512,".format(hex(ax % ay)))
print("},")
assert len(sys.argv) > 1
path = sys.argv[1]
if (os.path.isfile(path)):
decode_file(path)
else:
for root, _, files in os.walk(path):
for file in files:
decode_file(os.path.join(root, file))
|
b9866fc29d41223098d5584337a9ba6d9c3ee436
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/apache/httpd/test/conftest.py
|
a5c7c6bffb09fb9e1a11605f50291f3e7d0034c8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-zeusbench",
"BSD-3-Clause",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"Beerware",
"LicenseRef-scancode-other-permissive",
"Spencer-94",
"metamail",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
conftest.py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from pyhttpd.env import HttpdTestEnv
def pytest_report_header(config, startdir):
env = HttpdTestEnv()
return f"[apache httpd: {env.get_httpd_version()}, mpm: {env.mpm_module}, {env.prefix}]"
|
7e53d6f08f0597bbe8cb49fbd418fe59cba1bf8c
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/StandardVoucherOpenApiVO.py
|
3ba53d21598c572756e28776c5d670e1e6f6e7ce
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 22,065
|
py
|
StandardVoucherOpenApiVO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.AccountInfoVO import AccountInfoVO
from alipay.aop.api.domain.AccountInfoVO import AccountInfoVO
from alipay.aop.api.domain.AccountInfoVO import AccountInfoVO
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class StandardVoucherOpenApiVO(object):
def __init__(self):
self._algorithm_tag = None
self._balance = None
self._charge_amount = None
self._coa_properties = None
self._dt = None
self._dw_log_type = None
self._event_code = None
self._ext_info = None
self._fund_biz_code = None
self._fund_biz_name = None
self._gmt_create = None
self._gmt_modified = None
self._handle_status = None
self._hour = None
self._id = None
self._idempotent_id = None
self._inst_serial_no = None
self._manual_dist_demo = None
self._manual_dist_type = None
self._memo = None
self._operator = None
self._org_trans_no = None
self._ori_trans_amount = None
self._ori_trans_rate = None
self._other_account = None
self._out_biz_no = None
self._prod_code = None
self._rel_voucher_id = None
self._status = None
self._target_account = None
self._tnt_inst_id = None
self._trans_account = None
self._trans_amount = None
self._trans_direction = None
self._trans_dt = None
self._trans_inst_id = None
self._tx_id = None
self._voucher_type = None
self._writeoff_voucher_id = None
@property
def algorithm_tag(self):
return self._algorithm_tag
@algorithm_tag.setter
def algorithm_tag(self, value):
self._algorithm_tag = value
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._balance = value
else:
self._balance = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def charge_amount(self):
return self._charge_amount
@charge_amount.setter
def charge_amount(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._charge_amount = value
else:
self._charge_amount = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def coa_properties(self):
return self._coa_properties
@coa_properties.setter
def coa_properties(self, value):
self._coa_properties = value
@property
def dt(self):
return self._dt
@dt.setter
def dt(self, value):
self._dt = value
@property
def dw_log_type(self):
return self._dw_log_type
@dw_log_type.setter
def dw_log_type(self, value):
self._dw_log_type = value
@property
def event_code(self):
return self._event_code
@event_code.setter
def event_code(self, value):
self._event_code = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def fund_biz_code(self):
return self._fund_biz_code
@fund_biz_code.setter
def fund_biz_code(self, value):
self._fund_biz_code = value
@property
def fund_biz_name(self):
return self._fund_biz_name
@fund_biz_name.setter
def fund_biz_name(self, value):
self._fund_biz_name = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def handle_status(self):
return self._handle_status
@handle_status.setter
def handle_status(self, value):
self._handle_status = value
@property
def hour(self):
return self._hour
@hour.setter
def hour(self, value):
self._hour = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def idempotent_id(self):
return self._idempotent_id
@idempotent_id.setter
def idempotent_id(self, value):
self._idempotent_id = value
@property
def inst_serial_no(self):
return self._inst_serial_no
@inst_serial_no.setter
def inst_serial_no(self, value):
self._inst_serial_no = value
@property
def manual_dist_demo(self):
return self._manual_dist_demo
@manual_dist_demo.setter
def manual_dist_demo(self, value):
self._manual_dist_demo = value
@property
def manual_dist_type(self):
return self._manual_dist_type
@manual_dist_type.setter
def manual_dist_type(self, value):
self._manual_dist_type = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def org_trans_no(self):
return self._org_trans_no
@org_trans_no.setter
def org_trans_no(self, value):
self._org_trans_no = value
@property
def ori_trans_amount(self):
return self._ori_trans_amount
@ori_trans_amount.setter
def ori_trans_amount(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._ori_trans_amount = value
else:
self._ori_trans_amount = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def ori_trans_rate(self):
return self._ori_trans_rate
@ori_trans_rate.setter
def ori_trans_rate(self, value):
self._ori_trans_rate = value
@property
def other_account(self):
return self._other_account
@other_account.setter
def other_account(self, value):
if isinstance(value, AccountInfoVO):
self._other_account = value
else:
self._other_account = AccountInfoVO.from_alipay_dict(value)
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def rel_voucher_id(self):
return self._rel_voucher_id
@rel_voucher_id.setter
def rel_voucher_id(self, value):
self._rel_voucher_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def target_account(self):
return self._target_account
@target_account.setter
def target_account(self, value):
if isinstance(value, AccountInfoVO):
self._target_account = value
else:
self._target_account = AccountInfoVO.from_alipay_dict(value)
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def trans_account(self):
return self._trans_account
@trans_account.setter
def trans_account(self, value):
if isinstance(value, AccountInfoVO):
self._trans_account = value
else:
self._trans_account = AccountInfoVO.from_alipay_dict(value)
@property
def trans_amount(self):
return self._trans_amount
@trans_amount.setter
def trans_amount(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._trans_amount = value
else:
self._trans_amount = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def trans_direction(self):
return self._trans_direction
@trans_direction.setter
def trans_direction(self, value):
self._trans_direction = value
@property
def trans_dt(self):
return self._trans_dt
@trans_dt.setter
def trans_dt(self, value):
self._trans_dt = value
@property
def trans_inst_id(self):
return self._trans_inst_id
@trans_inst_id.setter
def trans_inst_id(self, value):
self._trans_inst_id = value
@property
def tx_id(self):
return self._tx_id
@tx_id.setter
def tx_id(self, value):
self._tx_id = value
@property
def voucher_type(self):
return self._voucher_type
@voucher_type.setter
def voucher_type(self, value):
self._voucher_type = value
@property
def writeoff_voucher_id(self):
return self._writeoff_voucher_id
@writeoff_voucher_id.setter
def writeoff_voucher_id(self, value):
self._writeoff_voucher_id = value
def to_alipay_dict(self):
params = dict()
if self.algorithm_tag:
if hasattr(self.algorithm_tag, 'to_alipay_dict'):
params['algorithm_tag'] = self.algorithm_tag.to_alipay_dict()
else:
params['algorithm_tag'] = self.algorithm_tag
if self.balance:
if hasattr(self.balance, 'to_alipay_dict'):
params['balance'] = self.balance.to_alipay_dict()
else:
params['balance'] = self.balance
if self.charge_amount:
if hasattr(self.charge_amount, 'to_alipay_dict'):
params['charge_amount'] = self.charge_amount.to_alipay_dict()
else:
params['charge_amount'] = self.charge_amount
if self.coa_properties:
if hasattr(self.coa_properties, 'to_alipay_dict'):
params['coa_properties'] = self.coa_properties.to_alipay_dict()
else:
params['coa_properties'] = self.coa_properties
if self.dt:
if hasattr(self.dt, 'to_alipay_dict'):
params['dt'] = self.dt.to_alipay_dict()
else:
params['dt'] = self.dt
if self.dw_log_type:
if hasattr(self.dw_log_type, 'to_alipay_dict'):
params['dw_log_type'] = self.dw_log_type.to_alipay_dict()
else:
params['dw_log_type'] = self.dw_log_type
if self.event_code:
if hasattr(self.event_code, 'to_alipay_dict'):
params['event_code'] = self.event_code.to_alipay_dict()
else:
params['event_code'] = self.event_code
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.fund_biz_code:
if hasattr(self.fund_biz_code, 'to_alipay_dict'):
params['fund_biz_code'] = self.fund_biz_code.to_alipay_dict()
else:
params['fund_biz_code'] = self.fund_biz_code
if self.fund_biz_name:
if hasattr(self.fund_biz_name, 'to_alipay_dict'):
params['fund_biz_name'] = self.fund_biz_name.to_alipay_dict()
else:
params['fund_biz_name'] = self.fund_biz_name
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.handle_status:
if hasattr(self.handle_status, 'to_alipay_dict'):
params['handle_status'] = self.handle_status.to_alipay_dict()
else:
params['handle_status'] = self.handle_status
if self.hour:
if hasattr(self.hour, 'to_alipay_dict'):
params['hour'] = self.hour.to_alipay_dict()
else:
params['hour'] = self.hour
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.idempotent_id:
if hasattr(self.idempotent_id, 'to_alipay_dict'):
params['idempotent_id'] = self.idempotent_id.to_alipay_dict()
else:
params['idempotent_id'] = self.idempotent_id
if self.inst_serial_no:
if hasattr(self.inst_serial_no, 'to_alipay_dict'):
params['inst_serial_no'] = self.inst_serial_no.to_alipay_dict()
else:
params['inst_serial_no'] = self.inst_serial_no
if self.manual_dist_demo:
if hasattr(self.manual_dist_demo, 'to_alipay_dict'):
params['manual_dist_demo'] = self.manual_dist_demo.to_alipay_dict()
else:
params['manual_dist_demo'] = self.manual_dist_demo
if self.manual_dist_type:
if hasattr(self.manual_dist_type, 'to_alipay_dict'):
params['manual_dist_type'] = self.manual_dist_type.to_alipay_dict()
else:
params['manual_dist_type'] = self.manual_dist_type
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.org_trans_no:
if hasattr(self.org_trans_no, 'to_alipay_dict'):
params['org_trans_no'] = self.org_trans_no.to_alipay_dict()
else:
params['org_trans_no'] = self.org_trans_no
if self.ori_trans_amount:
if hasattr(self.ori_trans_amount, 'to_alipay_dict'):
params['ori_trans_amount'] = self.ori_trans_amount.to_alipay_dict()
else:
params['ori_trans_amount'] = self.ori_trans_amount
if self.ori_trans_rate:
if hasattr(self.ori_trans_rate, 'to_alipay_dict'):
params['ori_trans_rate'] = self.ori_trans_rate.to_alipay_dict()
else:
params['ori_trans_rate'] = self.ori_trans_rate
if self.other_account:
if hasattr(self.other_account, 'to_alipay_dict'):
params['other_account'] = self.other_account.to_alipay_dict()
else:
params['other_account'] = self.other_account
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
if self.rel_voucher_id:
if hasattr(self.rel_voucher_id, 'to_alipay_dict'):
params['rel_voucher_id'] = self.rel_voucher_id.to_alipay_dict()
else:
params['rel_voucher_id'] = self.rel_voucher_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.target_account:
if hasattr(self.target_account, 'to_alipay_dict'):
params['target_account'] = self.target_account.to_alipay_dict()
else:
params['target_account'] = self.target_account
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.trans_account:
if hasattr(self.trans_account, 'to_alipay_dict'):
params['trans_account'] = self.trans_account.to_alipay_dict()
else:
params['trans_account'] = self.trans_account
if self.trans_amount:
if hasattr(self.trans_amount, 'to_alipay_dict'):
params['trans_amount'] = self.trans_amount.to_alipay_dict()
else:
params['trans_amount'] = self.trans_amount
if self.trans_direction:
if hasattr(self.trans_direction, 'to_alipay_dict'):
params['trans_direction'] = self.trans_direction.to_alipay_dict()
else:
params['trans_direction'] = self.trans_direction
if self.trans_dt:
if hasattr(self.trans_dt, 'to_alipay_dict'):
params['trans_dt'] = self.trans_dt.to_alipay_dict()
else:
params['trans_dt'] = self.trans_dt
if self.trans_inst_id:
if hasattr(self.trans_inst_id, 'to_alipay_dict'):
params['trans_inst_id'] = self.trans_inst_id.to_alipay_dict()
else:
params['trans_inst_id'] = self.trans_inst_id
if self.tx_id:
if hasattr(self.tx_id, 'to_alipay_dict'):
params['tx_id'] = self.tx_id.to_alipay_dict()
else:
params['tx_id'] = self.tx_id
if self.voucher_type:
if hasattr(self.voucher_type, 'to_alipay_dict'):
params['voucher_type'] = self.voucher_type.to_alipay_dict()
else:
params['voucher_type'] = self.voucher_type
if self.writeoff_voucher_id:
if hasattr(self.writeoff_voucher_id, 'to_alipay_dict'):
params['writeoff_voucher_id'] = self.writeoff_voucher_id.to_alipay_dict()
else:
params['writeoff_voucher_id'] = self.writeoff_voucher_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = StandardVoucherOpenApiVO()
if 'algorithm_tag' in d:
o.algorithm_tag = d['algorithm_tag']
if 'balance' in d:
o.balance = d['balance']
if 'charge_amount' in d:
o.charge_amount = d['charge_amount']
if 'coa_properties' in d:
o.coa_properties = d['coa_properties']
if 'dt' in d:
o.dt = d['dt']
if 'dw_log_type' in d:
o.dw_log_type = d['dw_log_type']
if 'event_code' in d:
o.event_code = d['event_code']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'fund_biz_code' in d:
o.fund_biz_code = d['fund_biz_code']
if 'fund_biz_name' in d:
o.fund_biz_name = d['fund_biz_name']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'handle_status' in d:
o.handle_status = d['handle_status']
if 'hour' in d:
o.hour = d['hour']
if 'id' in d:
o.id = d['id']
if 'idempotent_id' in d:
o.idempotent_id = d['idempotent_id']
if 'inst_serial_no' in d:
o.inst_serial_no = d['inst_serial_no']
if 'manual_dist_demo' in d:
o.manual_dist_demo = d['manual_dist_demo']
if 'manual_dist_type' in d:
o.manual_dist_type = d['manual_dist_type']
if 'memo' in d:
o.memo = d['memo']
if 'operator' in d:
o.operator = d['operator']
if 'org_trans_no' in d:
o.org_trans_no = d['org_trans_no']
if 'ori_trans_amount' in d:
o.ori_trans_amount = d['ori_trans_amount']
if 'ori_trans_rate' in d:
o.ori_trans_rate = d['ori_trans_rate']
if 'other_account' in d:
o.other_account = d['other_account']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'prod_code' in d:
o.prod_code = d['prod_code']
if 'rel_voucher_id' in d:
o.rel_voucher_id = d['rel_voucher_id']
if 'status' in d:
o.status = d['status']
if 'target_account' in d:
o.target_account = d['target_account']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'trans_account' in d:
o.trans_account = d['trans_account']
if 'trans_amount' in d:
o.trans_amount = d['trans_amount']
if 'trans_direction' in d:
o.trans_direction = d['trans_direction']
if 'trans_dt' in d:
o.trans_dt = d['trans_dt']
if 'trans_inst_id' in d:
o.trans_inst_id = d['trans_inst_id']
if 'tx_id' in d:
o.tx_id = d['tx_id']
if 'voucher_type' in d:
o.voucher_type = d['voucher_type']
if 'writeoff_voucher_id' in d:
o.writeoff_voucher_id = d['writeoff_voucher_id']
return o
|
369ff97fe8fbce3fc4c6d0bb1628c833b8ec5f2d
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/integration/testdata/invoke/cdk/assets/asset.ecc03cd9bbdad1c6d9750d997706058e2e6890371efe8dfe21b7b4e9faf5a2df/app.py
|
af9f93753402d919430220faa1c02f2df4ad4fc6
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 80
|
py
|
app.py
|
def lambda_handler(event, context):
return "Hello from FunctionBundledAssets!"
|
3e87b27c1fed708bdcc6cd394200380cc392ca88
|
6cd1a085c8113030f14a8c992ecd2b45db70c4e2
|
/tests/fields/test_factory.py
|
f0da3838e639ee4d16be1c248d758d53c922fb7b
|
[
"MIT"
] |
permissive
|
mhostetter/galois
|
a72e6339ac3bba2fda31e5c9f1228ebc85d5ebe7
|
a140a468fa1f7619f94ad2551f9c14e684ee3a34
|
refs/heads/master
| 2023-07-27T17:02:04.925608
| 2023-05-09T19:39:50
| 2023-05-09T22:37:26
| 312,901,841
| 186
| 25
|
MIT
| 2023-09-10T14:56:32
| 2020-11-14T21:05:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,468
|
py
|
test_factory.py
|
"""
A pytest module to test the class factory function :obj:`galois.GF`.
"""
import numpy as np
import pytest
import galois
@pytest.mark.parametrize("characteristic,degree", [(2, 1), (2, 8), (3, 1), (3, 5)])
def test_defaults(characteristic, degree):
GF = galois.GF(characteristic**degree)
assert issubclass(GF, galois.FieldArray)
assert GF.characteristic == characteristic
assert GF.degree == degree
GF = galois.GF(characteristic, degree)
assert issubclass(GF, galois.FieldArray)
assert GF.characteristic == characteristic
assert GF.degree == degree
def test_mandatory_kwargs():
GF = galois.GF(3**5, irreducible_poly="x^5 + 2x + 1")
assert GF.order == 3**5
assert GF.irreducible_poly == "x^5 + 2x + 1"
with pytest.raises(TypeError):
galois.GF(3**5, "x^5 + 2x + 1")
GF = galois.GF(3, 5, irreducible_poly="x^5 + 2x + 1")
assert GF.order == 3**5
assert GF.irreducible_poly == "x^5 + 2x + 1"
with pytest.raises(TypeError):
galois.GF(3, 5, "x^5 + 2x + 1")
def test_defaults_dont_modify_ufunc_mode():
"""
Ensures ufunc_mode=None (the default) doesn't modify the current ufunc mode.
"""
GF = galois.GF(2**8)
GF.compile("auto") # Reset to default
assert GF.ufunc_mode == "jit-lookup"
GF.compile("jit-calculate")
assert GF.ufunc_mode == "jit-calculate"
GF = galois.GF(2**8)
assert GF.ufunc_mode == "jit-calculate"
GF.compile("auto") # Reset to default
assert GF.ufunc_mode == "jit-lookup"
def test_can_modify_ufunc_mode():
GF = galois.GF(2**8)
GF.compile("auto") # Reset to default
assert GF.ufunc_mode == "jit-lookup"
GF = galois.GF(2**8, compile="jit-calculate")
assert GF.ufunc_mode == "jit-calculate"
GF.compile("auto") # Reset to default
assert GF.ufunc_mode == "jit-lookup"
def test_defaults_dont_modify_element_repr():
"""
Ensures repr=None (the default) doesn't modify the current element representation.
"""
GF = galois.GF(2**8)
GF.repr() # Reset to default
assert GF.element_repr == "int"
GF.repr("poly")
assert GF.element_repr == "poly"
GF = galois.GF(2**8)
assert GF.element_repr == "poly"
GF.repr() # Reset to default
assert GF.element_repr == "int"
def test_can_modify_element_repr():
GF = galois.GF(2**8)
GF.repr() # Reset to default
assert GF.element_repr == "int"
GF = galois.GF(2**8, repr="poly")
assert GF.element_repr == "poly"
GF.repr() # Reset to default
assert GF.element_repr == "int"
def test_basic_exceptions():
with pytest.raises(TypeError):
galois.GF(2.0**8)
with pytest.raises(TypeError):
galois.GF(2**8, verify=1)
with pytest.raises(TypeError):
galois.GF(2**8, compile=True)
with pytest.raises(TypeError):
galois.GF(2**8, repr=True)
with pytest.raises(ValueError):
galois.GF(2**3 * 5**3)
with pytest.raises(ValueError):
galois.GF(2**8, compile="invalid-argument")
with pytest.raises(ValueError):
galois.GF(2**8, repr="invalid-argument")
def test_irreducible_poly_exceptions():
with pytest.raises(TypeError):
galois.GF(2**8, irreducible_poly=285.0)
with pytest.raises(ValueError):
galois.GF(2**8, irreducible_poly=galois.Poly([1, 0, 0, 0, 1, 1, 1, 0, 1], field=galois.GF(3)))
with pytest.raises(ValueError):
galois.GF(2**8, irreducible_poly=galois.Poly([1, 1, 0, 0, 0, 1, 1, 1, 0, 1]))
with pytest.raises(ValueError):
galois.GF(2**8, irreducible_poly=galois.Poly([1, 0, 0, 0, 1, 1, 1, 0, 0]))
# Don't allow `irreducible_poly` for prime fields
with pytest.raises(ValueError):
galois.GF(3, irreducible_poly=[1, 1])
def test_primitive_element_exceptions():
with pytest.raises(TypeError):
galois.GF(2**8, primitive_element=2.0)
with pytest.raises(ValueError):
galois.GF(2**8, primitive_element=256)
with pytest.raises(ValueError):
galois.GF(7, primitive_element=10)
with pytest.raises(ValueError):
galois.GF(7, primitive_element=4)
with pytest.raises(ValueError):
galois.GF(2**8, primitive_element=galois.Poly([1, 0], field=galois.GF(3)))
with pytest.raises(ValueError):
galois.GF(2**8, primitive_element=galois.Poly([1, 0], field=galois.GF(3)))
with pytest.raises(ValueError):
galois.GF(2**8, primitive_element=galois.Poly([1, 0, 0, 0, 1, 1, 1, 0, 1]))
with pytest.raises(ValueError):
galois.GF(2**8, primitive_element=galois.Poly([1, 1, 1]))
@pytest.mark.parametrize("characteristic,degree", [(2, 8), (3, 5)])
def test_specify_irreducible_poly(characteristic, degree):
GF = galois.GF(characteristic**degree)
poly = GF.irreducible_poly
assert galois.GF(characteristic**degree, irreducible_poly=int(poly)) is GF
assert galois.GF(characteristic**degree, irreducible_poly=str(poly)) is GF
assert galois.GF(characteristic**degree, irreducible_poly=tuple(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, irreducible_poly=list(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, irreducible_poly=np.array(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, irreducible_poly=poly.coeffs) is GF
assert galois.GF(characteristic**degree, irreducible_poly=poly) is GF
@pytest.mark.parametrize("characteristic,degree", [(2, 1), (3, 1)])
def test_specify_primitive_element_prime(characteristic, degree):
GF = galois.GF(characteristic**degree)
alpha = GF.primitive_element
assert galois.GF(characteristic**degree, primitive_element=int(alpha)) is GF
@pytest.mark.parametrize("characteristic,degree", [(2, 8), (3, 5)])
def test_specify_primitive_element_extension(characteristic, degree):
GF = galois.GF(characteristic**degree)
poly = galois.Poly(GF.primitive_element.vector())
assert galois.GF(characteristic**degree, primitive_element=int(poly)) is GF
assert galois.GF(characteristic**degree, primitive_element=str(poly)) is GF
assert galois.GF(characteristic**degree, primitive_element=tuple(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, primitive_element=list(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, primitive_element=np.array(poly.coeffs)) is GF
assert galois.GF(characteristic**degree, primitive_element=poly.coeffs) is GF
assert galois.GF(characteristic**degree, primitive_element=poly) is GF
|
7f95429304344755808a5fad72b74b27e316366e
|
e9ee565cfff9e6b2a1ea6f73368f4a8948274795
|
/src/pybel/testing/cases.py
|
8ba4ce237d95dea074eb3bc95b0b49973ee18b14
|
[
"MIT"
] |
permissive
|
pybel/pybel
|
7e79530b454e23ae48486a5c0e3207744b7fa139
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
refs/heads/master
| 2022-08-26T18:41:25.724850
| 2022-02-11T12:22:35
| 2022-02-11T12:22:35
| 68,376,693
| 133
| 40
|
MIT
| 2022-02-11T12:11:24
| 2016-09-16T12:09:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
cases.py
|
# -*- coding: utf-8 -*-
"""Test cases for PyBEL testing."""
import logging
import os
import tempfile
import unittest
import pystow
from ..manager import Manager
__all__ = [
"TEST_CONNECTION",
"TemporaryCacheMixin",
"TemporaryCacheClsMixin",
"FleetingTemporaryCacheMixin",
]
logger = logging.getLogger(__name__)
TEST_CONNECTION = pystow.get_config("pybel", "test_connection")
class TemporaryCacheMixin(unittest.TestCase):
"""A test case that has a connection and a manager that is created for each test function."""
def setUp(self):
"""Set up the test function with a connection and manager."""
if TEST_CONNECTION:
self.connection = TEST_CONNECTION
else:
self.fd, self.path = tempfile.mkstemp()
self.connection = "sqlite:///" + self.path
logger.info("Test generated connection string %s", self.connection)
self.manager = Manager(connection=self.connection, autoflush=True)
self.manager.create_all()
def tearDown(self):
"""Tear down the test function by closing the session and removing the database."""
self.manager.session.close()
if not TEST_CONNECTION:
os.close(self.fd)
os.remove(self.path)
else:
self.manager.drop_all()
class TemporaryCacheClsMixin(unittest.TestCase):
"""A test case that has a connection and a manager that is created for each test class."""
fd, path, manager = None, None, None
@classmethod
def setUpClass(cls):
"""Set up the test class with a connection and manager."""
if TEST_CONNECTION:
cls.connection = TEST_CONNECTION
else:
cls.fd, cls.path = tempfile.mkstemp()
cls.connection = "sqlite:///" + cls.path
logger.info("Test generated connection string %s", cls.connection)
cls.manager = Manager(connection=cls.connection, autoflush=True)
cls.manager.create_all()
@classmethod
def tearDownClass(cls):
"""Tear down the test class by closing the session and removing the database."""
cls.manager.session.close()
if not TEST_CONNECTION:
os.close(cls.fd)
os.remove(cls.path)
else:
cls.manager.drop_all()
class FleetingTemporaryCacheMixin(TemporaryCacheClsMixin):
"""A test case that clears the database before each function."""
def setUp(self):
"""Set up the function by clearing the database."""
super(FleetingTemporaryCacheMixin, self).setUp()
self.manager.drop_networks()
self.manager.drop_edges()
self.manager.drop_nodes()
self.manager.drop_namespaces()
|
8ad4525a597f28745ca857b6ec9b150dd0bfdb7d
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/train/tensorflow/tensorflow_trainer.py
|
6333707e106abd8522935ffacbc02fd581fd06f3
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 7,605
|
py
|
tensorflow_trainer.py
|
from typing import Any, Callable, Optional, Dict, Union, TYPE_CHECKING
from ray.train import DataConfig
from ray.train.tensorflow.config import TensorflowConfig
from ray.train.trainer import GenDataset
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train import Checkpoint, ScalingConfig, RunConfig
from ray.util import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
@PublicAPI(stability="beta")
class TensorflowTrainer(DataParallelTrainer):
"""A Trainer for data parallel Tensorflow training.
This Trainer runs the function ``train_loop_per_worker`` on multiple Ray
Actors. These actors already have the necessary TensorFlow process group already
configured for distributed TensorFlow training.
The ``train_loop_per_worker`` function is expected to take in either 0 or 1
arguments:
.. testcode::
def train_loop_per_worker():
...
.. testcode::
def train_loop_per_worker(config: Dict):
...
If ``train_loop_per_worker`` accepts an argument, then
``train_loop_config`` will be passed in as the argument. This is useful if you
want to tune the values in ``train_loop_config`` as hyperparameters.
If the ``datasets`` dict contains a training dataset (denoted by
the "train" key), then it will be split into multiple dataset
shards that can then be accessed by ``ray.train.get_dataset_shard("train")`` inside
``train_loop_per_worker``. All the other datasets will not be split and
``ray.train.get_dataset_shard(...)`` will return the the entire Dataset.
Inside the ``train_loop_per_worker`` function, you can use any of the
:ref:`Ray Train loop methods <train-loop-api>`.
.. warning::
Ray will not automatically set any environment variables or configuration
related to local parallelism / threading
:ref:`aside from "OMP_NUM_THREADS" <omp-num-thread-note>`.
If you desire greater control over TensorFlow threading, use
the ``tf.config.threading`` module (eg.
``tf.config.threading.set_inter_op_parallelism_threads(num_cpus)``)
at the beginning of your ``train_loop_per_worker`` function.
.. testcode::
from ray import train
def train_loop_per_worker():
# Report intermediate results for callbacks or logging and
# checkpoint data.
train.report(...)
# Returns dict of last saved checkpoint.
train.get_checkpoint()
# Returns the Dataset shard for the given key.
train.get_dataset_shard("my_dataset")
# Returns the total number of workers executing training.
train.get_context().get_world_size()
# Returns the rank of this worker.
train.get_context().get_world_rank()
# Returns the rank of the worker on the current node.
train.get_context().get_local_rank()
Any returns from the ``train_loop_per_worker`` will be discarded and not
used or persisted anywhere.
To save a model to use for the ``TensorflowPredictor``, you must save it under the
"model" kwarg in ``Checkpoint`` passed to ``train.report()``.
Example:
.. testcode::
import os
import tempfile
import tensorflow as tf
import ray
from ray import train
from ray.train import Checkpoint, ScalingConfig
from ray.train.tensorflow import TensorflowTrainer
def build_model():
# toy neural network : 1-layer
return tf.keras.Sequential(
[tf.keras.layers.Dense(
1, activation="linear", input_shape=(1,))]
)
def train_loop_per_worker(config):
dataset_shard = train.get_dataset_shard("train")
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
with strategy.scope():
model = build_model()
model.compile(
optimizer="Adam", loss="mean_squared_error", metrics=["mse"])
tf_dataset = dataset_shard.to_tf(
feature_columns="x",
label_columns="y",
batch_size=1
)
for epoch in range(config["num_epochs"]):
model.fit(tf_dataset)
# Create checkpoint.
checkpoint_dir = tempfile.mkdtemp()
model.save_weights(
os.path.join(checkpoint_dir, "my_checkpoint")
)
checkpoint = Checkpoint.from_directory(checkpoint_dir)
train.report(
{},
checkpoint=checkpoint,
)
train_dataset = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
trainer = TensorflowTrainer(
train_loop_per_worker=train_loop_per_worker,
scaling_config=ScalingConfig(num_workers=3, use_gpu=True),
datasets={"train": train_dataset},
train_loop_config={"num_epochs": 2},
)
result = trainer.fit()
.. testoutput::
:options:+ELLIPSIS
:hide:
...
Args:
train_loop_per_worker: The training function to execute.
This can either take in no arguments or a ``config`` dict.
train_loop_config: Configurations to pass into
``train_loop_per_worker`` if it accepts an argument.
tensorflow_config: Configuration for setting up the TensorFlow backend.
If set to None, use the default configuration. This replaces the
``backend_config`` arg of ``DataParallelTrainer``.
scaling_config: Configuration for how to scale data parallel training.
dataset_config: Configuration for dataset ingest.
run_config: Configuration for the execution of the training run.
datasets: Any Datasets to use for training. Use
the key "train" to denote which dataset is the training
dataset.
resume_from_checkpoint: A checkpoint to resume training from.
metadata: Dict that should be made available via
`ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
for checkpoints saved from this Trainer. Must be JSON-serializable.
"""
def __init__(
self,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
tensorflow_config: Optional[TensorflowConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[DataConfig] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
metadata: Optional[Dict[str, Any]] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
# Deprecated.
preprocessor: Optional["Preprocessor"] = None,
):
if not tensorflow_config:
tensorflow_config = TensorflowConfig()
super(TensorflowTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
backend_config=tensorflow_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
preprocessor=preprocessor,
resume_from_checkpoint=resume_from_checkpoint,
metadata=metadata,
)
|
fe648c17e9626af829b18d1a2bc8cebaad3c9b91
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltPhase2L3OIL3MuonCandidates_cfi.py
|
27d13fe71e8c1276bfe9b8bd96e6e56d8437626c
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
hltPhase2L3OIL3MuonCandidates_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltPhase2L3OIL3MuonCandidates = cms.EDProducer("L3MuonCandidateProducer",
InputLinksObjects = cms.InputTag("hltPhase2L3OIL3MuonsLinksCombination"),
InputObjects = cms.InputTag("hltPhase2L3OIL3Muons"),
MuonPtOption = cms.string('Tracker')
)
|
eda6e5996d22b9de34e24669637f1be8e1415819
|
34f1136066c529b94dd1016eaf6f7ba47141b06c
|
/pyroSAR/snap/data/collect_suffices.py
|
1103086a74dc2dfb42cbc95c950ed00450565bc1
|
[
"MIT"
] |
permissive
|
johntruckenbrodt/pyroSAR
|
1b2103052dff89f56770d90b2a3c6f3b4dc6ae35
|
27d366d41bb5d0b2153e1b7c2f25a3945127abe0
|
refs/heads/main
| 2023-08-31T08:30:17.954438
| 2023-08-21T09:48:20
| 2023-08-21T09:48:20
| 95,563,172
| 421
| 111
|
MIT
| 2023-09-12T08:09:18
| 2017-06-27T13:48:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,902
|
py
|
collect_suffices.py
|
##############################################################
# SNAP source code scan for retrieving operator suffices
# Copyright (c) 2020-2021, the pyroSAR Developers.
# This file is part of the pyroSAR Project. It is subject to the
# license terms in the LICENSE.txt file found in the top-level
# directory of this distribution and at
# https://github.com/johntruckenbrodt/pyroSAR/blob/master/LICENSE.txt.
# No part of the pyroSAR project, including this file, may be
# copied, modified, propagated, or distributed except according
# to the terms contained in the LICENSE.txt file.
##############################################################
import os
import re
import subprocess as sp
from spatialist.ancillary import finder
"""
This script clones the SNAP source code from GitHub and reads the suffices for SNAP operators.
E.g. The operator Terrain-Flattening has a suffix TF. If Terrain-Flattening is added to a workflow
in SNAP's graph builder, this suffix is appended to the automatically created output file name.
As pyroSAR also automatically creates file names with processing step suffices, it is convenient to just
use those defined by SNAP.
Currently I am not aware of any way to retrieve them directly from a SNAP installation.
A question in the STEP forum is asked: https://forum.step.esa.int/t/snappy-get-operator-product-suffix/22885
Feel free to contact me if you have ideas on how to improve this!
"""
def main():
# some arbitrary directory for the source code
workdir = os.path.join(os.path.expanduser('~'), '.pyrosar', 'snap_code')
os.makedirs(workdir, exist_ok=True)
# the name of the Java properties file containing the operator-suffix lookup
outfile = 'snap.suffices.properties'
# clone all relevant toolboxes
for tbx in ['snap-engine', 'snap-desktop', 's1tbx']:
print(tbx)
target = os.path.join(workdir, tbx)
if not os.path.isdir(target):
url = 'https://github.com/senbox-org/{}'.format(tbx)
sp.check_call(['git', 'clone', url], cwd=workdir)
else:
sp.check_call(['git', 'pull'], cwd=target)
# search patterns for relevant files
# Usually files containing operator classes are named <operator>Op.java but with out dashes
# e.g. TerrainFlatteningOp.java for the Terrain-Flattening operator
# One exception is Calibration for which there is a sub-class for each SAR sensor
operators = finder(workdir, ['*Op.java', 'BaseCalibrator.java'])
# a list for collection the suffices
collect = []
for op in operators:
print(op)
with open(op, encoding='utf8') as infile:
content = infile.read()
# the suffix is defined as a class attribute PRODUCT_SUFFIX
pattern = 'String PRODUCT_SUFFIX = \"_([a-zA-Z]*)\"'
match = re.search(pattern, content)
if match:
suffix = match.groups()[0]
else:
suffix = ''
# the name of the operator as available in the UI
pattern = 'alias = \"([a-zA-Z-]*)\"'
match = re.search(pattern, content)
if match:
alias = match.groups()[0]
else:
alias = None
if suffix == 'Cal':
alias = 'Calibration'
# only collect operators for which an alias exists, i.e. which are exposed in the UI,
# and for which a suffix is defined. In the UI, all operators for which no suffix exists
# will just get no suffix in any written file.
if alias is not None and suffix != '':
print(alias, suffix)
collect.append('{0}={1}'.format(alias, suffix))
print('found {} matching operators'.format(len(collect)))
with open(outfile, 'w') as out:
out.write('\n'.join(sorted(collect, key=str.lower)))
if __name__ == '__main__':
main()
|
372de2edc96b4f09e51638b65f5c6a1a9976d98f
|
490f57a6d8dfc6dde28c8caf553b1c1b18e7dfd3
|
/nilmtk/legacy/disaggregate/__init__.py
|
2fab018cda3d11ae0bac6b5e92977ba9bee81e57
|
[
"Apache-2.0"
] |
permissive
|
nilmtk/nilmtk
|
280358b257f70e1e893ff4d21cc01c08e0ad17e0
|
b2c514479cef478cab872cb635056da08d5352a1
|
refs/heads/master
| 2023-08-12T09:45:58.010261
| 2023-06-07T06:59:36
| 2023-06-07T06:59:36
| 14,891,877
| 769
| 549
|
Apache-2.0
| 2023-06-07T06:59:37
| 2013-12-03T11:39:12
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
__init__.py
|
from .disaggregator import Disaggregator
from .combinatorial_optimisation import CombinatorialOptimisation
from .fhmm_exact import FHMM
from .hart_85 import Hart85
from .maximum_likelihood_estimation import MLE
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.