blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8ec49f2a8450bb2aafb64d16aace1fcb0b28281
|
3481023b43028c5ee9520a8be0978e914bdcb548
|
/manga_py/providers/chochox_com.py
|
ee1437168c42cb22631f91edb91bff67dfd01f6a
|
[
"MIT"
] |
permissive
|
manga-py/manga-py
|
18f6818d8efc96c3e69efee7dff3f3d6c773e32a
|
0db97123acab1f2fb99e808b0ba54db08977e5c8
|
refs/heads/stable_1.x
| 2023-08-20T03:04:06.373108
| 2023-04-16T08:28:15
| 2023-04-16T08:28:15
| 98,638,892
| 444
| 56
|
MIT
| 2023-07-27T13:21:40
| 2017-07-28T10:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 731
|
py
|
chochox_com.py
|
from manga_py.provider import Provider
from .helpers.std import Std
class ChoChoxCom(Provider, Std):
def get_archive_name(self) -> str:
return 'archive'
def get_chapter_index(self) -> str:
return '0'
def get_content(self):
return self._get_content('{}/{}')
def get_manga_name(self) -> str:
return self._get_name(r'\.\w{2,7}/([^/]+)')
def get_chapters(self):
return [b'']
def get_files(self):
return [i.get('src') for i in self._elements('img.alignnone')]
def get_cover(self) -> str:
pass
def book_meta(self) -> dict:
# todo meta
pass
def chapter_for_json(self):
return self.get_url()
main = ChoChoxCom
|
7179e6db311070fc297499324bf94b415b5b904f
|
8ebc3925894d4f796efb703cdf3254fc56724c3a
|
/aws-py-dynamicresource/mysql_dynamic_provider.py
|
e3a7b318ac07eed48d427f0078c9a550db6c5afa
|
[
"Apache-2.0"
] |
permissive
|
pulumi/examples
|
8db27b8847f8c05bcc8d99cdec8eb6c7b7ffa2a3
|
26ffb4bb327f00457796c96676e7db5e25e2bbd6
|
refs/heads/master
| 2023-09-04T04:56:53.098380
| 2023-08-31T14:33:12
| 2023-08-31T14:33:12
| 108,589,232
| 2,156
| 974
|
Apache-2.0
| 2023-09-13T23:27:18
| 2017-10-27T19:50:31
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
mysql_dynamic_provider.py
|
# Copyright 2016-2020, Pulumi Corporation. All rights reserved.
import mysql.connector as connector
from mysql.connector import errorcode
from pulumi import Input, Output, ResourceOptions
from pulumi.dynamic import *
from typing import Any, Optional
import binascii
import os
# A class representing the arguments that the dynamic provider needs. Each argument
# will automatically be converted from Input[T] to T before being passed to the
# functions in the provider
class SchemaInputs(object):
creator_name: Input[str]
creator_password: Input[str]
server_address: Input[str]
database_name: Input[str]
creation_script: Input[str]
deletion_script: Input[str]
def __init__(self, creator_name, creator_password, server_address, database_name, creation_script, deletion_script):
self.creator_name = creator_name
self.creator_password = creator_password
self.server_address = server_address
self.database_name = database_name
self.creation_script = creation_script
self.deletion_script = deletion_script
# The code for the dynamic provider that gives us our custom resource. It handles
# all the create, read, update, and delete operations the resource needs.
class SchemaProvider(ResourceProvider):
# The function that is called when a new resource needs to be created
def create(self, args):
# A connection is created to the MySQL database, and the script is run
connection = connector.connect(user=args["creator_name"],
password=args["creator_password"],
host=args["server_address"],
database=args["database_name"])
cursor = connection.cursor()
cursor.execute(args["creation_script"], multi=True)
# The creation process is finished. We assign a unique ID to this resource,
# and return all the outputs required by the resource (in this case
# outputs are identical to the inputs)
return CreateResult("schema-"+binascii.b2a_hex(os.urandom(16)).decode("utf-8"), outs=args)
# The function that is called when an existing resource needs to be deleted
def delete(self, id, args):
# A connection is created to the MySQL database, and the script is run
connection = connector.connect(user=args["creator_name"],
password=args["creator_password"],
host=args["server_address"],
database=args["database_name"])
cursor = connection.cursor()
cursor.execute(args["deletion_script"])
# The function that determines if an existing resource whose inputs were
# modified needs to be updated or entirely replaced
def diff(self, id, old_inputs, new_inputs):
# server_address, database_name, and creation_script are critical inputs
# that require the resource to be entirely replaced if they are modified.
# Changes in other inputs mean the resource can be safely updated without
# recreating it
replaces = []
if (old_inputs["server_address"] != new_inputs["server_address"]): replaces.append("server_address")
if (old_inputs["database_name"] != new_inputs["database_name"]): replaces.append("database_name")
if (old_inputs["creation_script"] != new_inputs["creation_script"]): replaces.append("creation_script")
return DiffResult(
# If the old and new inputs don't match, the resource needs to be updated/replaced
changes=old_inputs != new_inputs,
# If the replaces[] list is empty, nothing important was changed, and we do not have to
# replace the resource
replaces=replaces,
# An optional list of inputs that are always constant
stables=None,
# The existing resource is deleted before the new one is created
delete_before_replace=True)
# The function that updates an existing resource without deleting and
# recreating it from scratch
def update(self, id, old_inputs, new_inputs):
# The old existing inputs are discarded and the new inputs are used
return UpdateResult(outs={**new_inputs})
# The main Schema resource that we instantiate in our infrastructure code
class Schema(Resource):
# The inputs used by the dynamic provider are made implicitly availible as outputs
creator_name: Output[str]
creator_password: Output[str]
server_address: Output[str]
database_name: Output[str]
creation_script: Output[str]
deletion_script: Output[str]
def __init__(self, name: str, args: SchemaInputs, opts = None):
# NOTE: The args object is converted to a dictionary using vars()
super().__init__(SchemaProvider(), name, vars(args), opts)
|
2d80b01fb39abc1d846bece0586562063ba97bd3
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/infoxlm/src-infoxlm/infoxlm/tasks/mlm.py
|
6c22c00e3f6aa3b52f7a1661e8fa69d6b83714b1
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
mlm.py
|
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
@register_task("mlm")
class Mlm(FairseqTask):
@staticmethod
def add_args(parser):
mlm_utils.add_mlm_args(parser)
parser.add_argument('data', help='colon separated path to data directories list, '
'will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments per sample')
# apply prepend bos + tokenblock
parser.add_argument('--apply_ptb', default=False, action='store_true')
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| Dictionary: {} types'.format(len(dictionary)), flush=True)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.seed = args.seed
self.mww = self._get_whole_word_mask()
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
print("| Get whole work mask ...")
return mlm_utils.get_whole_word_mask(self.args, self.dictionary)
return None
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
self.datasets[split] = dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
e002dfaa7920c12f35c6d1e6b5863d93ba5bcdfa
|
5ea4a3a0e50d2cee386f497c8449d13cd80450f9
|
/xsdata/codegen/mixins.py
|
69f83ade62a4fddf0f9f534fea452363978fa0f8
|
[
"MIT"
] |
permissive
|
tefra/xsdata
|
8df028ff79cd04b29ecf24401810562b8917b7be
|
31f672af84fd040a97996871916a41b1046fe46b
|
refs/heads/main
| 2023-08-17T03:20:06.912750
| 2023-08-12T15:24:40
| 2023-08-12T15:24:40
| 217,130,848
| 243
| 49
|
MIT
| 2023-08-30T15:25:31
| 2019-10-23T18:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
mixins.py
|
import abc
from abc import ABCMeta
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.config import GeneratorConfig
from xsdata.utils.constants import return_true
class ContainerInterface(abc.ABC):
"""Wrap a list of classes and expose a simple api for easy access and
process."""
__slots__ = ("config",)
def __init__(self, config: GeneratorConfig):
self.config = config
@abc.abstractmethod
def __iter__(self) -> Iterator[Class]:
"""Create an iterator for the class map values."""
@abc.abstractmethod
def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]:
"""Search by qualified name for a specific class with an optional
condition callable."""
@abc.abstractmethod
def find_inner(self, source: Class, qname: str) -> Class:
"""Search by qualified name for a specific inner class or fail."""
@abc.abstractmethod
def first(self, qname: str) -> Class:
"""Search by qualified name for a specific class and return the first
available."""
@abc.abstractmethod
def add(self, item: Class):
"""Add class item to the container."""
@abc.abstractmethod
def extend(self, items: List[Class]):
"""Add a list of classes to the container."""
@abc.abstractmethod
def reset(self, item: Class, qname: str):
"""Update the given class qualified name."""
@abc.abstractmethod
def set(self, items: List[Class]):
"""Set the list of classes to the container."""
class HandlerInterface(abc.ABC):
"""Class handler interface."""
__slots__ = ()
@abc.abstractmethod
def process(self, target: Class):
"""Process the given target class."""
class RelativeHandlerInterface(HandlerInterface, metaclass=ABCMeta):
"""Class handler interface with access to the complete classes'
container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
def base_attrs(self, target: Class) -> List[Attr]:
attrs: List[Attr] = []
for extension in target.extensions:
base = self.container.find(extension.type.qname)
assert base is not None
attrs.extend(self.base_attrs(base))
attrs.extend(base.attrs)
return attrs
@abc.abstractmethod
def process(self, target: Class):
"""Process class."""
class ContainerHandlerInterface(abc.ABC):
"""Class container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
@abc.abstractmethod
def run(self):
"""Run the process for the whole container."""
|
a16b0e305d70ee272ab9f51d8e869a30abbecb31
|
d26bcefcb646e1bf9843d4872b2f85c12a0872db
|
/molfeat/trans/pretrained/__init__.py
|
fb7d39bb8d3f7950aef454c7f817abdf27257dd4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
datamol-io/molfeat
|
2247335df72fb6ac715869a5752d7215f61c5af0
|
4390f9fce25fa2da94338227f7c8f33a23e25b2a
|
refs/heads/main
| 2023-08-31T18:27:43.933571
| 2023-08-01T13:42:04
| 2023-08-01T13:42:04
| 613,548,667
| 111
| 14
|
Apache-2.0
| 2023-09-08T12:31:02
| 2023-03-13T19:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
__init__.py
|
from molfeat.trans.pretrained.base import PretrainedMolTransformer
from molfeat.trans.pretrained.fcd import FCDTransformer
from molfeat.trans.pretrained.dgl_pretrained import PretrainedDGLTransformer
from molfeat.trans.pretrained.graphormer import GraphormerTransformer
from molfeat.trans.pretrained.hf_transformers import PretrainedHFTransformer
|
2651217c414da4593610eec17cd64af49502802f
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/tests/components/tectonics/test_listric_kinematic_extender.py
|
e680338bf72e41ec4e4639385e32a51ea15bef89
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,868
|
py
|
test_listric_kinematic_extender.py
|
#!/usr/bin/env python3
"""
Created on Fri Mar 5 08:42:24 2021
@author: gtucker
"""
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_raises
from landlab import HexModelGrid, RadialModelGrid, RasterModelGrid
from landlab.components import Flexure, ListricKinematicExtender
def test_hangingwall_nodes():
"""Test the correct identification of hangingwall nodes."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(grid, fault_location=2500.0)
assert_array_equal(
extender._hangwall, [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20]
)
def test_subsidence_and_horiz_shift():
"""Test that elev subsides then shifts after 2 time steps."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
topo = grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(
grid, extension_rate=0.01, fault_location=2500.0
)
# Run long enough to extend by half a grid cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -1404.156819, -910.66907, -590.616478, -383.045648, -248.425118],
)
# Now extend another half cell, so cumulative extension is one cell and
# elevations should get shifted by one cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -3514.477461, -2808.313638, -1821.338140, -1181.232956, -766.091296],
)
# Another step, and this time the hangingwall edge has moved by one cell,
# so the first 3 cells in this row should not further subside.
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[
0.0,
0.0,
-3514.477461,
-3718.982708,
-2411.954617,
-1564.278603,
-1014.516414,
],
)
def test_with_hex_grid():
grid = HexModelGrid((5, 5), node_layout="rect")
grid.add_zeros("topographic__elevation", at="node")
ListricKinematicExtender(grid)
ListricKinematicExtender(grid, fault_location=2.0)
grid = HexModelGrid((5, 5), node_layout="rect", orientation="vertical")
grid.add_zeros("topographic__elevation", at="node")
assert_raises(NotImplementedError, ListricKinematicExtender, grid)
def test_with_flexure():
"""Test integrating with flexure."""
crust_density = 2700.0 # density of crustal column, kg/m3
dx = 2500.0 # grid spacing, m
dt = 125000.0 # time step, y
upper_crust_base_depth = 10000.0 # m
grid = RasterModelGrid((3, 7), xy_spacing=dx)
topo = grid.add_zeros("topographic__elevation", at="node")
load = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
thickness = grid.add_zeros("upper_crust_thickness", at="node")
upper_crust_base = grid.add_zeros("upper_crust_base__elevation", at="node")
extender = ListricKinematicExtender(
grid,
extension_rate=0.01,
fault_location=2500.0,
track_crustal_thickness=True,
)
flexer = Flexure(grid, eet=5000.0, method="flexure")
deflection = grid.at_node["lithosphere_surface__elevation_increment"]
topo[
grid.x_of_node <= 7500.0
] = 1000.0 # this will force thickness to be 1 km greater at left
upper_crust_base[:] = -upper_crust_base_depth
thickness[:] = topo - upper_crust_base
unit_wt = crust_density * flexer.gravity
load[:] = unit_wt * thickness # loading pressure
# Get the initial deflection, which we'll need to calculate total current
# deflection
flexer.update()
init_deflection = deflection.copy()
# Run extension for half a grid cell. Elevations change, but thickness
# doesn't, so deflection should not change. We should be able to recover
# elevation from:
#
# topo = thickness + crust base - (deflection + subsidence)
#
extender.run_one_step(dt=dt)
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
net_deflection[7:14],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
)
test_topo = thickness + upper_crust_base - (net_deflection + extender._cum_subs)
assert_array_almost_equal(topo, test_topo)
# Now extend for another half cell, which should force a shift. The
# cumulative subsidence will be subtracted from the thickness field,
# representing thinning as the hangingwall slides to the "right". This
# will cause net upward isostatic deflection.
extender.run_one_step(dt=dt)
load[:] = unit_wt * thickness
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
thickness[7:14],
[
11000.0,
11000.0,
8191.686362, # greatest subsidence: lost nearly 3 km
9178.66186,
9818.767044, # thicker because shifted (only lost <200 m)
9233.908704,
9503.149763,
],
)
assert_array_almost_equal(
net_deflection[7:14],
[
-59.497362,
-65.176276,
-69.222531,
-70.334462,
-68.608952,
-64.912352,
-59.743080,
],
)
def test_error_handling():
radial_grid = RadialModelGrid(
n_rings=1, nodes_in_first_ring=8
) # , xy_of_center=(0., 0.))
assert_raises(TypeError, ListricKinematicExtender, radial_grid)
hex_grid = HexModelGrid((3, 3))
assert_raises(TypeError, ListricKinematicExtender, hex_grid)
grid = RasterModelGrid((3, 7))
grid.add_zeros("topographic__elevation", at="node")
assert_raises(
KeyError, ListricKinematicExtender, grid, track_crustal_thickness=True
)
|
9e16b78b005558b78c5f98437463a3962a8a4899
|
8c39ba92cc71ff78242477d3256f6ee3daa872c7
|
/conan/cli/formatters/list/binary_html_table.py
|
266e02ea210612ef126f57a2c4c35c21a3bbc8a9
|
[
"MIT"
] |
permissive
|
conan-io/conan
|
eb4427e534a0edbb1fb06c753d5d9587faaef93c
|
bac455d1329b6744cdc41747354a727c9233179f
|
refs/heads/release/2.0
| 2023-09-03T18:51:54.345761
| 2023-09-03T17:30:43
| 2023-09-03T17:30:43
| 47,190,624
| 7,754
| 1,182
|
MIT
| 2023-09-14T15:16:09
| 2015-12-01T13:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,612
|
py
|
binary_html_table.py
|
from collections import OrderedDict, defaultdict
from conans.model.package_ref import PkgReference
class RowResult(object):
def __init__(self, remote, reference, data):
self.remote = remote
self.reference = reference
self._data = data
@property
def recipe(self):
return self.reference
@property
def package_id(self):
return self._data['id']
def row(self, headers):
""" Returns package data according to headers """
assert isinstance(headers, Headers), "Wrong type: {}".format(type(headers))
for it in headers.keys:
try:
yield getattr(self, it)
except AttributeError:
yield self._data[it]
for it in headers.settings:
yield self._data['settings'].get(it, None)
for it in headers.options:
yield self._data['options'].get(it, None)
if headers.requires:
prefs = [PkgReference.loads(it) for it in self._data['requires']]
yield ', '.join(map(str, [it.ref for it in prefs]))
class Headers(object):
_preferred_ordering = ['os', 'arch', 'compiler', 'build_type']
def __init__(self, settings, options, requires, keys):
# Keys: columns to classify
self.keys = keys
self.options = options
self.requires = requires
# - Order settings
_settings = defaultdict(list)
for it in settings:
try:
category, _ = it.split('.', 1)
except ValueError:
_settings[it].append(it)
else:
_settings[category].append(it)
self.settings = []
for it in self._preferred_ordering:
if it in _settings:
self.settings.extend(sorted(_settings[it]))
for it, values in _settings.items():
if it not in self._preferred_ordering:
self.settings.extend(sorted(values))
def row(self, n_rows=2):
"""
Retrieve list of headers as a single list (1-row) or as a list of tuples with
settings organized by categories (2-row).
Example output:
1-row: ['os', 'arch', 'compiler', 'compiler.version', 'compiler.libcxx', 'build_type']
2-row: [('os', ['']), ('arch', ['']), ('compiler', ['', 'version', 'libcxx']),]
"""
headers = list(self.keys)
if n_rows == 1:
headers.extend(self.settings + self.options)
if self.requires:
headers.append('requires')
return headers
elif n_rows == 2:
headers = [(it, ['']) for it in headers]
settings = self._group_settings(self.settings)
headers.extend(settings)
headers.append(('options', self.options))
if self.requires:
headers.append(('requires', ['']))
return headers
else:
raise NotImplementedError("not yet")
@staticmethod
def _group_settings(settings):
"""
From one row to two-rows using '.' as separator
"""
ret = OrderedDict()
for setting in settings:
try:
category, value = setting.split(".", 1)
except ValueError:
ret.setdefault(setting, []).append('')
else:
ret.setdefault(category, []).append(value)
return [(key, values) for key, values in ret.items()]
class Results(object):
def __init__(self, results):
self._results = results
# Collect data inspecting the packages
_settings = set()
_options = set()
_remotes = set()
self.requires = False
for it in results:
_remotes.add(it['remote'])
for p in it['items'][0]['packages']:
_settings = _settings.union(list(p['settings'].keys()))
_options = _options.union(list(p['options'].keys()))
if len(p['requires']):
self.requires = True
self.settings = list(_settings)
self.options = list(_options)
self.remotes = list(_remotes)
def get_headers(self, keys=('remote', 'reference', 'package_id')):
return Headers(self.settings, self.options, self.requires, keys=keys)
def packages(self):
for it in self._results:
remote = it['remote']
reference = it['items'][0]['recipe']['id']
for p in it['items'][0]['packages']:
r = RowResult(remote, reference, p)
yield r
|
258069732a85da7ee34589437a8b52c5707fd4fc
|
391fb5b11425d59ea917c6fed51fe1fa9c672764
|
/opytimizer/math/general.py
|
74ce73ac860e92293a081660c56141521b718bdc
|
[
"Apache-2.0"
] |
permissive
|
gugarosa/opytimizer
|
89e60d582dee9e31b1723e35d08103d7f8f5d3e1
|
7326a887ed8e3858bc99c8815048d56d02edf88c
|
refs/heads/master
| 2023-08-01T08:09:12.055317
| 2023-05-11T15:21:58
| 2023-05-11T15:21:58
| 109,152,650
| 602
| 45
|
Apache-2.0
| 2023-09-07T14:26:13
| 2017-11-01T16:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,399
|
py
|
general.py
|
"""General-based mathematical functions.
"""
from itertools import islice
from typing import Any, Iterable, List, Optional
import numpy as np
import opytimizer.math.random as r
def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:
"""Calculates the Euclidean distance between two n-dimensional points.
Args:
x: N-dimensional point.
y: N-dimensional point.
Returns:
(float): Euclidean distance between `x` and `y`.
"""
distance = np.linalg.norm(x - y)
return distance
def kmeans(
x: np.ndarray,
n_clusters: Optional[int] = 1,
max_iterations: Optional[int] = 100,
tol: Optional[float] = 1e-4,
) -> np.ndarray:
"""Performs the K-Means clustering over the input data.
Args:
x: Input array with a shape equal to (n_samples, n_variables, n_dimensions).
n_clusters: Number of clusters.
max_iterations: Maximum number of clustering iterations.
tol: Tolerance value to stop the clustering.
Returns:
(np.ndarray): An array holding the assigned cluster per input sample.
"""
n_samples, n_variables, n_dimensions = x.shape[0], x.shape[1], x.shape[2]
centroids = np.zeros((n_clusters, n_variables, n_dimensions))
labels = np.zeros(n_samples)
for i in range(n_clusters):
idx = r.generate_integer_random_number(0, n_samples)
centroids[i] = x[idx]
for _ in range(max_iterations):
dists = np.squeeze(np.array([np.linalg.norm(x - c, axis=1) for c in centroids]))
updated_labels = np.squeeze(np.array(np.argmin(dists, axis=0)))
ratio = np.sum(labels != updated_labels) / n_samples
if ratio <= tol:
break
labels = updated_labels
for i in range(n_clusters):
centroid_samples = x[labels == i]
if centroid_samples.shape[0] > 0:
centroids[i] = np.mean(centroid_samples, axis=0)
return labels
def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:
"""Iterates over an iterator and returns n-wise samples from it.
Args:
x (list): Values to be iterated over.
size: Amount of samples per iteration.
Returns:
(Iterable): N-wise samples from the iterator.
"""
iterator = iter(x)
return iter(lambda: tuple(islice(iterator, size)), ())
def tournament_selection(
fitness: List[float], n: int, size: Optional[int] = 2
) -> np.array:
"""Selects n-individuals based on a tournament selection.
Args:
fitness (list): List of individuals fitness.
n: Number of individuals to be selected.
size: Tournament size.
Returns:
(np.array): Indexes of selected individuals.
"""
selected = []
for _ in range(n):
step = [np.random.choice(fitness) for _ in range(size)]
selected.append(np.where(min(step) == fitness)[0][0])
return selected
def weighted_wheel_selection(weights: List[float]) -> int:
"""Selects an individual from a weight-based roulette.
Args:
weights: List of individuals weights.
Returns:
(int): Weight-based roulette individual.
"""
cumulative_sum = np.cumsum(weights)
prob = r.generate_uniform_random_number() * cumulative_sum[-1]
for i, c_sum in enumerate(cumulative_sum):
if c_sum > prob:
return i
return None
|
3bc9f6db9040c164bcbadacfeaf13b3f40436623
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/tools/nemo_forced_aligner/utils/viterbi_decoding.py
|
78336f800e149b358c2fd9c9d1fb73b32f0ee76b
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,494
|
py
|
viterbi_decoding.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from utils.constants import V_NEGATIVE_NUM
def viterbi_decoding(log_probs_batch, y_batch, T_batch, U_batch, viterbi_device):
"""
Do Viterbi decoding with an efficient algorithm (the only for-loop in the 'forward pass' is over the time dimension).
Args:
log_probs_batch: tensor of shape (B, T_max, V). The parts of log_probs_batch which are 'padding' are filled
with 'V_NEGATIVE_NUM' - a large negative number which represents a very low probability.
y_batch: tensor of shape (B, U_max) - contains token IDs including blanks in every other position. The parts of
y_batch which are padding are filled with the number 'V'. V = the number of tokens in the vocabulary + 1 for
the blank token.
T_batch: tensor of shape (B, 1) - contains the durations of the log_probs_batch (so we can ignore the
parts of log_probs_batch which are padding)
U_batch: tensor of shape (B, 1) - contains the lengths of y_batch (so we can ignore the parts of y_batch
which are padding).
viterbi_device: the torch device on which Viterbi decoding will be done.
Returns:
alignments_batch: list of lists containing locations for the tokens we align to at each timestep.
Looks like: [[0, 0, 1, 2, 2, 3, 3, ..., ], ..., [0, 1, 2, 2, 2, 3, 4, ....]].
Each list inside alignments_batch is of length T_batch[location of utt in batch].
"""
B, T_max, _ = log_probs_batch.shape
U_max = y_batch.shape[1]
# transfer all tensors to viterbi_device
log_probs_batch = log_probs_batch.to(viterbi_device)
y_batch = y_batch.to(viterbi_device)
T_batch = T_batch.to(viterbi_device)
U_batch = U_batch.to(viterbi_device)
# make tensor that we will put at timesteps beyond the duration of the audio
padding_for_log_probs = V_NEGATIVE_NUM * torch.ones((B, T_max, 1), device=viterbi_device)
# make log_probs_padded tensor of shape (B, T_max, V +1 ) where all of
# log_probs_padded[:,:,-1] is the 'V_NEGATIVE_NUM'
log_probs_padded = torch.cat((log_probs_batch, padding_for_log_probs), dim=2)
# initialize v_prev - tensor of previous timestep's viterbi probabilies, of shape (B, U_max)
v_prev = V_NEGATIVE_NUM * torch.ones((B, U_max), device=viterbi_device)
v_prev[:, :2] = torch.gather(input=log_probs_padded[:, 0, :], dim=1, index=y_batch[:, :2])
# initialize backpointers_rel - which contains values like 0 to indicate the backpointer is to the same u index,
# 1 to indicate the backpointer pointing to the u-1 index and 2 to indicate the backpointer is pointing to the u-2 index
backpointers_rel = -99 * torch.ones((B, T_max, U_max), dtype=torch.int8, device=viterbi_device)
# Make a letter_repetition_mask the same shape as y_batch
# the letter_repetition_mask will have 'True' where the token (including blanks) is the same
# as the token two places before it in the ground truth (and 'False everywhere else).
# We will use letter_repetition_mask to determine whether the Viterbi algorithm needs to look two tokens back or
# three tokens back
y_shifted_left = torch.roll(y_batch, shifts=2, dims=1)
letter_repetition_mask = y_batch - y_shifted_left
letter_repetition_mask[:, :2] = 1 # make sure dont apply mask to first 2 tokens
letter_repetition_mask = letter_repetition_mask == 0
for t in range(1, T_max):
# e_current is a tensor of shape (B, U_max) of the log probs of every possible token at the current timestep
e_current = torch.gather(input=log_probs_padded[:, t, :], dim=1, index=y_batch)
# apply a mask to e_current to cope with the fact that we do not keep the whole v_matrix and continue
# calculating viterbi probabilities during some 'padding' timesteps
t_exceeded_T_batch = t >= T_batch
U_can_be_final = torch.logical_or(
torch.arange(0, U_max, device=viterbi_device).unsqueeze(0) == (U_batch.unsqueeze(1) - 0),
torch.arange(0, U_max, device=viterbi_device).unsqueeze(0) == (U_batch.unsqueeze(1) - 1),
)
mask = torch.logical_not(torch.logical_and(t_exceeded_T_batch.unsqueeze(1), U_can_be_final,)).long()
e_current = e_current * mask
# v_prev_shifted is a tensor of shape (B, U_max) of the viterbi probabilities 1 timestep back and 1 token position back
v_prev_shifted = torch.roll(v_prev, shifts=1, dims=1)
# by doing a roll shift of size 1, we have brought the viterbi probability in the final token position to the
# first token position - let's overcome this by 'zeroing out' the probabilities in the firest token position
v_prev_shifted[:, 0] = V_NEGATIVE_NUM
# v_prev_shifted2 is a tensor of shape (B, U_max) of the viterbi probabilities 1 timestep back and 2 token position back
v_prev_shifted2 = torch.roll(v_prev, shifts=2, dims=1)
v_prev_shifted2[:, :2] = V_NEGATIVE_NUM # zero out as we did for v_prev_shifted
# use our letter_repetition_mask to remove the connections between 2 blanks (so we don't skip over a letter)
# and to remove the connections between 2 consective letters (so we don't skip over a blank)
v_prev_shifted2.masked_fill_(letter_repetition_mask, V_NEGATIVE_NUM)
# we need this v_prev_dup tensor so we can calculated the viterbi probability of every possible
# token position simultaneously
v_prev_dup = torch.cat(
(v_prev.unsqueeze(2), v_prev_shifted.unsqueeze(2), v_prev_shifted2.unsqueeze(2),), dim=2,
)
# candidates_v_current are our candidate viterbi probabilities for every token position, from which
# we will pick the max and record the argmax
candidates_v_current = v_prev_dup + e_current.unsqueeze(2)
# we straight away save results in v_prev instead of v_current, so that the variable v_prev will be ready for the
# next iteration of the for-loop
v_prev, bp_relative = torch.max(candidates_v_current, dim=2)
backpointers_rel[:, t, :] = bp_relative
# trace backpointers
alignments_batch = []
for b in range(B):
T_b = int(T_batch[b])
U_b = int(U_batch[b])
if U_b == 1: # i.e. we put only a blank token in the reference text because the reference text is empty
current_u = 0 # set initial u to 0 and let the rest of the code block run as usual
else:
current_u = int(torch.argmax(v_prev[b, U_b - 2 : U_b])) + U_b - 2
alignment_b = [current_u]
for t in range(T_max - 1, 0, -1):
current_u = current_u - int(backpointers_rel[b, t, current_u])
alignment_b.insert(0, current_u)
alignment_b = alignment_b[:T_b]
alignments_batch.append(alignment_b)
return alignments_batch
|
4ab82b0fbdcd92c9c2260cd7824f81796d8f88a8
|
74218ee28b26a2b0c28c69c01eabb8b81b9d1f1d
|
/tests/python/pyiexample/example/example.pyi
|
d01e3e74145c14c88f70d8c3d19061aa95c101c6
|
[
"MIT"
] |
permissive
|
readthedocs/sphinx-autoapi
|
5f2cd9f996693b14325e9452165731b6fb0a89e6
|
92437c9af72b75fbccb451da95f85dbbf0c3f0da
|
refs/heads/main
| 2023-09-02T19:45:48.149162
| 2023-07-08T17:38:24
| 2023-07-08T17:38:24
| 36,524,868
| 277
| 129
|
MIT
| 2023-08-21T12:48:43
| 2015-05-29T19:32:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,089
|
pyi
|
example.pyi
|
# -*- coding: utf-8 -*-
"""Example module
This is a description
"""
class Foo(object):
"""Can we parse arguments from the class docstring?
:param attr: Set an attribute.
:type attr: str
"""
class_var = 42 #: Class var docstring
another_class_var = 42
"""Another class var docstring"""
class_var_without_value = ...
"""A class var without a value."""
class Meta(object):
"""A nested class just to test things out"""
@classmethod
def foo():
"""The foo class method"""
...
def __init__(self, attr):
"""Constructor docstring"""
...
def method_okay(self, foo=None, bar=None):
"""This method should parse okay"""
...
def method_multiline(self, foo=None, bar=None, baz=None):
"""This is on multiple lines, but should parse okay too
pydocstyle gives us lines of source. Test if this means that multiline
definitions are covered in the way we're anticipating here
"""
...
def method_without_docstring(self): ...
|
4b16c7532916eac4d4b9e3efe9c504fbb68cfbfc
|
89b0ace1c0b87f063fc598efc4232efd98604de7
|
/src/misc/mandelbrot.py
|
e7d963f84f28341fe3c9e6f7e046425aaf100f34
|
[
"MIT"
] |
permissive
|
neozhaoliang/pywonderland
|
bdcc19d46caf12cd7bf50d3852fc6465119a4cd9
|
e3d24dc5c182a29db096163d1059afafaa79ce89
|
refs/heads/master
| 2023-06-28T20:52:23.784200
| 2023-03-21T02:43:10
| 2023-03-21T02:43:10
| 73,236,487
| 4,549
| 479
|
MIT
| 2022-01-30T08:05:28
| 2016-11-08T23:34:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
mandelbrot.py
|
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A fast Mandelbrot set wallpaper renderer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
reddit discussion:
"https://www.reddit.com/r/math/comments/2abwyt/smooth_colour_mandelbrot"
"""
import numpy as np
from numba import jit
from PIL import Image
MAXITERS = 200
RADIUS = 100
@jit
def color(z, i):
v = np.log2(i + 1 - np.log2(np.log2(abs(z)))) / 5
if v < 1.0:
return v ** 4, v ** 2.5, v
else:
v = max(0, 2 - v)
return v, v ** 1.5, v ** 3
@jit
def iterate(c):
z = 0j
for i in range(MAXITERS):
if z.real * z.real + z.imag * z.imag > RADIUS:
return color(z, i)
z = z * z + c
return 0, 0, 0
def main(xmin, xmax, ymin, ymax, width, height):
y, x = np.ogrid[ymax : ymin : height * 1j, xmin : xmax : width * 1j]
z = x + y * 1j
R, G, B = np.asarray(np.frompyfunc(iterate, 1, 3)(z)).astype(float)
img = np.stack((R, G, B), axis=2)
Image.fromarray(np.uint8(img * 255)).save("mandelbrot.png")
if __name__ == "__main__":
main(-2.1, 0.8, -1.16, 1.16, 800, 640)
|
8340969e5a5056ad1568bdcbef03bbe4dd40591c
|
cbbdf8a0d08d6f0684e6b4bc7de4b32d11f6e36d
|
/textbox/model/abstract_model.py
|
f8864f75974102ea4c02645455f7e5639c6d148c
|
[
"MIT"
] |
permissive
|
RUCAIBox/TextBox
|
e5ae0429bc3c60364b1584f32335190e7c476dc7
|
0d7debd9817c0f0278a52506e41ac2677d567550
|
refs/heads/2.0.0
| 2023-08-15T22:50:06.409200
| 2023-05-18T02:26:52
| 2023-05-18T02:26:52
| 311,009,578
| 1,002
| 130
|
MIT
| 2023-07-27T14:51:48
| 2020-11-08T07:35:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,325
|
py
|
abstract_model.py
|
import torch
import torch.nn as nn
from textbox import CLM_MODELS, SEQ2SEQ_MODELS, RNN_MODELS, PLM_MODELS
from transformers import EncoderDecoderModel
import os
from typing import List, Optional, Tuple, Union
from transformers.modeling_utils import get_parameter_dtype
from collections import OrderedDict
class AbstractModel(nn.Module):
r"""Base class for all models
"""
def __init__(self, config, tokenizer):
# load parameters info
super(AbstractModel, self).__init__()
self.device = config['device']
self.config = config
self.tokenizer = tokenizer
self.source_max_length = config['src_len']
self.target_max_length = config['tgt_len']
# check model
self.model_name = config['model_name']
self.is_casual_model = bool(self.model_name in CLM_MODELS)
self.is_seq2seq_model = bool(self.model_name in SEQ2SEQ_MODELS or self.model_name in RNN_MODELS)
self.is_prompt_tuning = 'prompt-tuning' in config['efficient_methods']
self.label_smoothing = config['label_smoothing'] if config['label_smoothing'] else 0.
def generate_setting(self, config):
# geneation settings
self.generation_kwargs = {}
self.generation_kwargs['max_length'] = self.target_max_length
if self.model_name in PLM_MODELS:
# transformer models
self.generation_kwargs['decoder_start_token_id'] = \
self.configuration.decoder_start_token_id \
if self.model_name != 'mbart' else \
self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang]
self.generation_kwargs.update(config['generation_kwargs'] or {})
def generate(self, batch_data):
r"""Predict the texts conditioned on a noise or sequence.
Args:
batch_data (Corpus): Corpus class of a single batch.
Returns:
torch.Tensor: Generated text, shape: [batch_size, max_len]
"""
raise NotImplementedError
def _process_prompt_tuning_input(self, inputs, batch):
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
params = sum(p.numel() for p in self.parameters() if p.requires_grad)
return super().__str__() + '\nTrainable parameters: {}'.format(params)
def forward(self, batch, epoch_idx=-1):
inputs = self.process_forward_inputs(batch)
if self.is_prompt_tuning:
inputs = self._process_prompt_tuning_input(inputs, batch)
outputs = self.model(**inputs)
if self.label_smoothing:
loss_fct = nn.CrossEntropyLoss(label_smoothing=self.label_smoothing)
vocab_size = outputs.logits.size(-1)
if self.is_casual_model:
logits = outputs.logits[..., :-1, :].contiguous()
labels = inputs['labels'][..., 1:].contiguous()
else:
logits = outputs.logits
labels = inputs['labels']
return loss_fct(logits.view(-1, vocab_size), labels.view(-1))
else:
return outputs.loss
def generate(self, batch, accelerator):
inputs = self.process_generate_inputs(batch)
if self.is_prompt_tuning:
inputs = self._process_prompt_tuning_input(inputs, batch)
if self.is_casual_model:
input_ids_len = inputs['input_ids'].shape[1] if 'input_ids' in inputs else inputs['inputs_embeds'].shape[1]
self.generation_kwargs['max_length'] = self.target_max_length + input_ids_len
# sample_outputs = self.model.generate(**inputs, **self.generation_kwargs)
sample_outputs = accelerator.unwrap_model(self.model).generate(**inputs, **self.generation_kwargs)
sample_outputs = accelerator.pad_across_processes(sample_outputs, dim=1, pad_index=self.tokenizer.pad_token_id)
sample_outputs = accelerator.gather((sample_outputs))
if self.is_casual_model:
sample_outputs = sample_outputs[:, input_ids_len:]
decode_kwargs = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': False}
generated_text = self.tokenizer.batch_decode(sample_outputs, **decode_kwargs)
generated_text = [g.strip() or 'NULL' for g in generated_text]
return generated_text
def process_forward_inputs(self, batch):
inputs = self.process_generate_inputs(batch)
inputs.update({'labels': batch['target_ids'].to(self.device)})
return inputs
def process_generate_inputs(self, batch):
inputs = {
'input_ids': batch['source_ids'].to(self.device),
'attention_mask': batch['source_mask'].to(self.device),
}
return inputs
def from_pretrained(self, save_directory: Union[str, os.PathLike]):
if self.model_name in ['bert2bert', 'xlm-roberta', 'xlm']:
self.model = EncoderDecoderModel.from_pretrained(save_directory)
else:
model_path = os.path.join(save_directory, 'pytorch_model.bin')
model_load = torch.load(model_path, map_location=self.device)
self.load_state_dict(model_load)
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
is_main_process: bool = True,
):
# save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
# we currently don't use this setting automatically, but may start to use with v5
dtype = get_parameter_dtype(self)
self.configuration.torch_dtype = str(dtype).split(".")[1]
# Attach architecture to the config
self.configuration.architectures = [self.model.__class__.__name__]
# Save the config
if is_main_process:
self.configuration.save_pretrained(save_directory)
# Save the tokenizer
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory)
if self.model_name in ['bert2bert', 'xlm-roberta', 'xlm']:
self.model.save_pretrained(save_directory)
else:
state_dict = OrderedDict([(k, v.detach().cpu()) for k, v in self.state_dict().items()])
torch.save(state_dict, os.path.join(save_directory, 'pytorch_model.bin'))
|
51a2e0c46309ccdeb1c86d7a635952fee99d73bd
|
6662fa24dfab8ace6c8a4852bc4e8b91e7abd9c4
|
/Python/Examples/Macros/DisplayItem.py
|
8e296e3d210d8beb06b40748a5d845e16a1acd54
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
RoboDK/RoboDK-API
|
7b71da015de3cabeaebf83e559bd3afb2aa189eb
|
9fa5b7bcc30bf0aab70e4b46aff9a0e0f4f27fd3
|
refs/heads/master
| 2023-08-30T11:05:04.165912
| 2023-08-24T23:13:39
| 2023-08-24T23:13:39
| 123,698,891
| 206
| 115
|
NOASSERTION
| 2023-07-19T09:47:15
| 2018-03-03T14:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
DisplayItem.py
|
# This script allows you to show or hide any objects as a program call
#
# For example, you can call the program like this:
# DisplayItem(KUKA KR3, Show)
# DisplayItem(KUKA KR3, Hide)
from robodk.robolink import *
from robodk.robodialogs import *
# Name and visible variables
ItemName = ''
visible = True
if len(sys.argv) < 2:
# Ask the user to change the visiblity
print("Invalid parameters. This function must be called as DisplayItem(ItemName, show/hide)")
print('Number of arguments: ' + str(len(sys.argv)))
#raise Exception('Invalid parameters provided: ' + str(sys.argv))
entry = mbox('Alter the visibility of any item. Type:\nRobotName-show/hide\n\nNote: this can be called as a program.\nExample: DisplayItem(RobotName, hide)', entry='RobotName-hide')
if not entry:
print('Operation cancelled')
quit(0)
name_value = entry.split('-')
if len(name_value) < 2:
raise Exception('Invalid entry: ' + entry)
ItemName = name_value[0].strip()
visible = name_value[1].strip().lower() != "hide"
else:
# Parse command line options
ItemName = sys.argv[1].strip()
visible = sys.argv[2].strip().lower() != "hide"
# Start the RoboDK API and set the item visibility
RDK = Robolink()
item = RDK.Item(ItemName)
item.setVisible(visible)
|
2e3a5351b42d9e49c4f2c3d92848e363391bc18b
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/tests/pipelines/text_to_video/test_video_to_video.py
|
41e213c43deabe70f5bd56969e49ada531e79ddb
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
test_video_to_video.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNet3DConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = VideoToVideoSDPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
test_attention_slicing = False
# No `output_type`.
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet3DConditionModel(
block_out_channels=(32, 64, 64, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"),
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
cross_attention_dim=32,
attention_head_dim=4,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=512,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
# 3 frames
video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def test_text_to_video_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = VideoToVideoSDPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["output_type"] = "np"
frames = sd_pipe(**inputs).frames
image_slice = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
expected_slice = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3)
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self):
pass
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_single_identical(self):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def test_num_images_per_prompt(self):
pass
def test_progress_bar(self):
return super().test_progress_bar()
@slow
@skip_mps
class VideoToVideoSDPipelineSlowTests(unittest.TestCase):
def test_two_step_model(self):
pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
# 10 frames
generator = torch.Generator(device="cpu").manual_seed(0)
video = torch.randn((1, 10, 3, 1024, 576), generator=generator)
video = video.to("cuda")
prompt = "Spiderman is surfing"
video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames
expected_array = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656])
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1e-2
|
99542fb845666434efd143cb3c22ffe75b03e19c
|
94724578994ab1438dcefb51b7ef4d8570da5d4c
|
/iterm2/changebg.py
|
e098d5f9378771d5668db1c0fb9029123df38020
|
[] |
no_license
|
PegasusWang/collection_python
|
6648d83203634abf44fd42c0b37b0bf7cc406d8f
|
9ef019a737a0817860d3184924c67a0833bd1252
|
refs/heads/master
| 2023-09-01T23:15:39.813635
| 2023-08-24T06:46:12
| 2023-08-24T06:46:12
| 43,693,872
| 130
| 90
| null | 2021-04-26T15:12:55
| 2015-10-05T15:28:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
changebg.py
|
#!/usr/bin/env python3
"""
https://superuser.com/questions/1068105/iterm2-os-x-change-background-image-for-current-window-from-shell
https://github.com/gnachman/iTerm2/blob/master/api/library/python/iterm2/iterm2/profile.py
"""
import asyncio
import random
import iterm2
async def main(connection):
"""修改背景图"""
while True:
images = [
"/Users/pegasus/Pictures/壁纸/低头.jpg",
"/Users/pegasus/Pictures/壁纸/后背.jpg",
"/Users/pegasus/Pictures/壁纸/双枪.jpg",
"/Users/pegasus/Pictures/壁纸/射箭.jpg",
]
image_path = random.choice(images)
app = await iterm2.async_get_app(connection)
session = app.current_terminal_window.current_tab.current_session
profile = await session.async_get_profile()
await profile.async_set_background_image_location(image_path)
await asyncio.sleep(3)
async def disable_image(connection):
app = await iterm2.async_get_app(connection)
session = app.current_terminal_window.current_tab.current_session
profile = await session.async_get_profile()
await profile.async_set_background_image_location("")
if __name__ == "__main__":
iterm2.run_until_complete(main)
# iterm2.run_until_complete(disable_image)
|
bc34f9d870613fdeead36a505b36c3caf0649f25
|
57914280e8505404feea3417504deab69b74ab48
|
/src/pypkg/consolepi/power/outlets.py
|
2bde4edd06f3b95c0742b7cc7289e2c017a539e1
|
[
"MIT"
] |
permissive
|
Pack3tL0ss/ConsolePi
|
1799ef3ec6e4b5c8629582a1f717e2b4b0d612bc
|
f7a19421bcf222f851bd138334954139e2e37454
|
refs/heads/master
| 2023-08-29T07:07:54.193774
| 2023-08-07T20:30:54
| 2023-08-07T20:30:54
| 163,647,608
| 147
| 16
|
MIT
| 2023-09-10T18:45:07
| 2018-12-31T07:10:13
|
Python
|
UTF-8
|
Python
| false
| false
| 31,884
|
py
|
outlets.py
|
#!/etc/ConsolePi/venv/bin/python3
import json
import threading
import time
from typing import Any, Dict, List, Tuple, Union
try:
import RPi.GPIO as GPIO
is_rpi = True
except (RuntimeError, ModuleNotFoundError, ImportError):
is_rpi = False
from consolepi import log, config, requests, utils # type: ignore
from consolepi.power import DLI # type: ignore
TIMING = False
class ConsolePiPowerException(Exception):
pass
class Outlets:
def __init__(self):
if is_rpi:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self._dli = {}
# Some convenience Bools used by menu to determine what options to display
self.dli_exists = True if 'dli' in config.outlet_types or config.do_dli_menu else False
self.tasmota_exists = True if 'tasmota' in config.outlet_types else False
self.esphome_exists = True if 'esphome' in config.outlet_types else False # TODO Future
self.gpio_exists = True if 'gpio' in config.outlet_types else False
self.linked_exists = True if config.linked_exists else False
if self.dli_exists or self.tasmota_exists or self.esphome_exists or self.gpio_exists:
self.outlets_exists = True
else:
self.outlets_exists = False
self.data: Dict[str, Any] = config.outlets
if config.power:
self.pwr_start_update_threads()
def linked(self):
pass
def do_tasmota_cmd(self, address, command=None):
'''
Perform Operation on Tasmota outlet:
params:
address: IP or resolvable hostname
command:
True | 'ON': power the outlet on
False | 'OFF': power the outlet off
'Toggle': Toggle the outlet
'cycle': Cycle Power on outlets that are powered On
TODO: Right now this method does not verify if port is currently in an ON state
before allowing 'cycle', resulting in it powering on the port consolepi-menu
verifies status before allowing the command, but *may* be that other outlet
are handled by this library.. check & make consistent
TODO: remove int returns and re-factor all returns to use a return class (like requests)
'''
# sub to make the api call to the tasmota device
def tasmota_req(**kwargs):
querystring = kwargs['querystring']
try:
response = requests.request("GET", url, headers=headers, params=querystring, timeout=config.so_timeout)
if response.status_code == 200:
if json.loads(response.text)['POWER'] == 'ON':
_response = True
elif json.loads(response.text)['POWER'] == 'OFF':
_response = False
else:
_response = 'invalid state returned {}'.format(response.text)
else:
_response = '[{}] error returned {}'.format(response.status_code, response.text)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
_response = 'Unreachable'
except requests.exceptions.RequestException as e:
log.debug(f"[tasmota_req] {url.replace('http://', '').replace('https://', '').split('/')[0]} Exception: {e}")
_response = 'Unreachable ~ hit catchall exception handler' # determine if other exceptions types are possible
return _response
# -------- END SUB --------
url = 'http://' + address + '/cm'
headers = {
'Cache-Control': "no-cache",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
querystring = {"cmnd": "Power"}
cycle = False
if command is not None:
if isinstance(command, bool):
command = 'ON' if command else 'OFF'
command = command.upper()
if command in ['ON', 'OFF', 'TOGGLE']:
querystring = {"cmnd": f"Power {command}"}
elif command == 'CYCLE':
cur_state = tasmota_req(querystring={"cmnd": "Power"})
if isinstance(cur_state, bool):
if cur_state:
querystring = {"cmnd": "Power OFF"}
cycle = True
else:
return False # return False for cycle if outlet was off indicating state is invalid for cycle
else:
return cur_state # an error occurred getting current state
else:
raise ConsolePiPowerException(f'Invalid Type {type(command)} passed to do_tasmota_cmd')
# -- // Send Request to TASMOTA \\ --
r = tasmota_req(querystring=querystring)
if cycle:
if isinstance(r, bool):
if not r:
time.sleep(config.cycle_time)
r = tasmota_req(querystring={"cmnd": "Power ON"})
else:
return 'Unexpected response, port returned on state expected off'
return r
def do_esphome_cmd(self, address, relay_id, command=None):
'''Perform Operation on espHome outlets.
Arguments:
address {str} -- ip of fqdn of espHome outlet
relay_id {str} -- The id of the relay/port
Keyword Arguments:
command {Bool|str|None} -- The command to perform on the outlet (default: {None})
None (Default): get current state of outlet
True | 'ON': power the outlet on
False | 'OFF': power the outlet off
'Toggle': Toggle the outlet
'cycle': Cycle Power on outlets that are powered On
Returns:
Bool or str -- Bool indicating state of outlet after operation or str with error text
'''
def esphome_req(*args, command=command):
'''sub function to perform operation on outlet
'''
try:
method = "GET" if command is None else "POST"
response = requests.request(method, url=url,
headers=headers, timeout=config.so_timeout)
if response.status_code == 200:
if command is None:
_response = response.json().get('value')
else:
# espHome returns status Code 200 with no content for /toggle
if command in ['toggle', 'cycle']:
_response = not cur_state
else:
_response = command # success return bool command
else:
_response = '[{}] error returned {}'.format(response.status_code, response.text)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
_response = 'Unreachable'
except requests.exceptions.RequestException as e:
log.debug(f"[esphome_req] {url.replace('http://', '').replace('https://', '').split('/')[0]} Exception: {e}")
_response = 'Unreachable' # So I can determine if other exceptions types are possible when unreachable
return _response
# -------- END SUB --------
url = status_url = 'http://' + address + '/switch/' + str(relay_id)
headers = {
'Cache-Control': "no-cache",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
# -- Get initial State of Outlet --
cur_state = esphome_req(command=None)
cycle = False
if command is None:
return cur_state
elif isinstance(command, bool):
if command: # Turn On Outlet
url = status_url + '/turn_on'
if cur_state is True:
return cur_state
else: # Turn Off Outlet
url = status_url + '/turn_off'
if cur_state is False:
return cur_state
elif command in ['toggle', 'cycle']:
url = status_url + '/toggle'
if command == 'cycle':
if cur_state:
cycle = True
else:
return False # Cycle invalid for outlets in off state
else:
return f'[PWR-ESP] DEV Note: Invalid command \'{command}\' passed to func'
# -- // Send Request to esphome \\ --
r = esphome_req(command=command)
if isinstance(r, bool):
if cycle:
if r is False:
cur_state = False
time.sleep(config.cycle_time)
r = esphome_req()
else:
return '[PWR-ESP] Unexpected response, port returned on state expected off'
return r
def load_dli(self, address, username, password):
'''
Returns instace of DLI class
Response: tuple
DLI-class-object, Bool: True if class was previously instantiated ~ needs update
False indicating class was just instantiated ~ data is fresh
i.e.: (<dli object>, True)
'''
if not self._dli.get(address):
try:
self._dli[address] = DLI(address, username, password, timeout=config.dli_timeout, log=log)
except Exception as e:
log.warning(f"[PWR-DLI] DLI @ {address} is now unreachable {e.__class__.__name__}", show=True)
return None, None
# --// Return Pass or fail based on reachability \\--
if self._dli[address].reachable:
return self._dli[address], False
else:
return None, None
# --// DLI Already Loaded \\--
else:
return self._dli[address], True
def pwr_start_update_threads(self, upd_linked: bool = False, failures: Dict[str, Any] = {}, t_name: str = 'init'):
kwargs = {'upd_linked': upd_linked, 'failures': failures}
outlets = self.data.get('defined', {})
if not failures:
if 'failures' in outlets:
failures = outlets['failures']
if failures: # re-attempt connection to failed power controllers on refresh
outlets = {**outlets, **failures}
failures = {}
# this shouldn't happen, but prevents spawning multiple updates for same outlet
_outlets = outlets.copy()
if _outlets is not None:
for k in _outlets:
found = False
for t in threading.enumerate():
if t.name == f'{t_name}_pwr_{k}':
found = True
break
if not found:
threading.Thread(target=self.pwr_get_outlets, args=[{k: _outlets[k]}],
kwargs=kwargs, name=t_name + '_pwr_' + k).start()
def update_linked_devs(self, outlet: Dict[str, Any]) -> Tuple[Dict[str, Any], List[Union[str, int]]]:
'''Update linked devs for dli outlets if they exist
Params:
dict -- the outlet dict to be updated
Returns:
tuple -- 0: dict: updated outlet dict or same dict if no linked_devs
1: list: list of all ports linked on this dli (used to initiate query against the dli)
'''
this_dli = self._dli.get(outlet['address'])
_p = []
if outlet.get('linked_devs'):
for dev in outlet['linked_devs']:
if isinstance(outlet['linked_devs'][dev], list):
[_p.append(int(_)) for _ in outlet['linked_devs'][dev] if int(_) not in _p]
elif int(outlet['linked_devs'][dev]) not in _p:
_p.append(int(outlet['linked_devs'][dev]))
if isinstance(_p, int):
outlet['is_on'] = {_p: this_dli.outlets[_p]}
else:
outlet['is_on'] = {port: this_dli.outlets[port] for port in _p}
return outlet, _p
def dli_close_all(self, dlis=None):
'''Close Connection to any connected dli Web Power Switches
Arguments:
dlis {dict} -- dict of dli objects
'''
dlis = self._dli if not dlis else dlis
for address in dlis:
if dlis[address].dli:
if getattr(dlis[address], 'rest'):
threading.Thread(target=dlis[address].dli.close).start()
else:
threading.Thread(target=dlis[address].dli.session.close).start()
def pwr_get_outlets(self, outlet_data: Dict[str, Any] = {}, upd_linked: bool = False, failures: Dict[str, Any] = {}) -> Dict[str, Any]:
'''Get Details for Outlets defined in ConsolePi.yaml power section
On Menu Launch this method is called in parallel (threaded) for each outlet
On Refresh all outlets are passed to the method
params: - All Optional
outlet_data:dict, The outlets that need to be updated, if not provided will get all outlets defined in ConsolePi.yaml
upd_linked:Bool, If True will update just the linked ports, False is for dli and will update
all ports for the dli.
failures:dict: when refreshing outlets pass in previous failures so they can be re-tried
'''
# re-attempt connection to failed power controllers on refresh
log.debug(f"[PWR VRFY (pwr_get_outlets)] Processing {', '.join(outlet_data.keys())}")
if not failures:
failures = outlet_data.get('failures', {}) if outlet_data.get('failures') else self.data.get('failures', {})
outlet_data = self.data.get('defined', {}) if not outlet_data else outlet_data
if failures:
outlet_data = {**outlet_data, **failures}
failures = {}
dli_power = self.data.get('dli_power', {})
esp_power = self.data.get('esp_power', {})
for k in outlet_data:
outlet = outlet_data[k]
_start = time.perf_counter()
# -- // GPIO \\ --
if outlet['type'].upper() == 'GPIO':
if not is_rpi:
log.warning('GPIO Outlet Defined, GPIO Only Supported on RPi - ignored', show=True)
continue
noff = True if 'noff' not in outlet else outlet['noff']
GPIO.setup(outlet['address'], GPIO.OUT)
outlet_data[k]['is_on'] = bool(GPIO.input(outlet['address'])) if noff \
else not bool(GPIO.input(outlet['address']))
# -- // tasmota \\ --
elif outlet['type'] == 'tasmota':
response = self.do_tasmota_cmd(outlet['address'])
outlet['is_on'] = response
if response not in [0, 1, True, False]:
failures[k] = outlet_data[k]
failures[k]['error'] = f'[PWR-TASMOTA] {k}:{failures[k]["address"]} {response} - Removed'
log.warning(failures[k]['error'], show=True)
# -- // esphome \\ --
elif outlet['type'] == 'esphome':
# TODO have do_esphome accept list, slice, or str for one or multiple relays
relays = utils.listify(outlet.get('relays', k)) # if they have not specified the relay try name of outlet
outlet['is_on'] = {}
esp_ok = True
for r in relays:
response = self.do_esphome_cmd(outlet['address'], r)
outlet['is_on'][r] = {'state': response, 'name': r}
if response not in [True, False]:
failures[k] = outlet_data[k]
failures[k]['error'] = f'[PWR-ESP] {k}:{r} ({failures[k]["address"]}) {response} - Removed'
log.warning(failures[k]['error'], show=True)
esp_ok = False
break
# add multi-port esp_outlets to dli_menu, unless all outlets are linked anyway
# if esp is 8 ports add it to dli regardless (dli are 8 and they get that treatment)
if esp_ok and len(relays) > 1 or (esp_ok and len(relays) == 8):
no_linkage_relays = [r for r in relays if f"'{r}'" not in str(outlet_data[k]["linked_devs"])]
if no_linkage_relays:
esp_power[outlet_data[k]["address"]] = outlet_data[k]["is_on"]
# -- // dli \\ --
elif outlet['type'].lower() == 'dli':
if TIMING:
dbg_line = '------------------------ // NOW PROCESSING {} \\\\ ------------------------'.format(k)
print('\n{}'.format('=' * len(dbg_line)))
print('{}\n{}\n{}'.format(dbg_line, outlet_data[k], '-' * len(dbg_line)))
print('{}'.format('=' * len(dbg_line)))
# -- // VALIDATE CONFIG FILE DATA FOR DLI \\ --
all_good = True # initial value
for _ in ['address', 'username', 'password']:
if not outlet.get(_):
all_good = False
failures[k] = outlet_data[k]
failures[k]['error'] = f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} ' \
'configuration - skipping'
log.error(f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} '
'configuration - skipping', show=True)
break
if not all_good:
continue
(this_dli, _update) = self.load_dli(outlet['address'], outlet['username'], outlet['password'])
if this_dli is None or this_dli.dli is None:
failures[k] = outlet_data[k]
failures[k]['error'] = '[PWR-DLI {}] {} Unreachable - Removed'.format(k, failures[k]['address'])
log.warning(f"[PWR-DLI {k}] {failures[k]['address']} Unreachable - Removed", show=True)
else:
if TIMING:
xstart = time.perf_counter()
print('this_dli.outlets: {} {}'.format(this_dli.outlets, 'update' if _update else 'init'))
print(json.dumps(dli_power, indent=4, sort_keys=True))
# upd_linked is for faster update in power menu only refreshes data for linked ports vs entire dli
if upd_linked and self.data['dli_power'].get(outlet['address']):
if outlet.get('linked_devs'):
(outlet, _p) = self.update_linked_devs(outlet)
if k in outlet_data:
outlet_data[k]['is_on'] = this_dli[_p]
else:
log.error(f'[PWR GET_OUTLETS] {k} appears to be unreachable')
# TODO not actually using the error returned this turned into a hot mess
if isinstance(outlet['is_on'], dict) and not outlet['is_on']:
all_good = False
# update dli_power for the refreshed / linked ports
else:
for _ in outlet['is_on']:
dli_power[outlet['address']][_] = outlet['is_on'][_]
else:
if _update:
dli_power[outlet['address']] = this_dli.get_dli_outlets() # data may not be fresh trigger dli update
# handle error connecting to dli during refresh - when connect worked on menu launch
if not dli_power[outlet['address']]:
failures[k] = outlet_data[k]
failures[k]['error'] = f"[PWR-DLI] {k} {failures[k]['address']} Unreachable - Removed"
log.warning(f'[PWR-DLI {k}] {failures[k]["address"]} Unreachable - Removed',
show=True)
continue
else: # dli was just instantiated data is fresh no need to update
dli_power[outlet['address']] = this_dli.outlets
if outlet.get('linked_devs'):
(outlet, _p) = self.update_linked_devs(outlet)
if TIMING:
print('[TIMING] this_dli.outlets: {}'.format(time.perf_counter() - xstart)) # type: ignore
log.debug(f"{outlet['type'].lower()} {k} Updated. Elapsed Time(secs): {time.perf_counter() - _start}")
# -- END for LOOP for k in outlet_data --
# Move failed outlets from the keys that populate the menu to the 'failures' key
# failures are displayed in the footer section of the menu, then re-tried on refresh
for _dev in failures.copy():
if outlet_data.get(_dev):
del outlet_data[_dev]
if self.data['defined'].get(_dev):
del self.data['defined'][_dev]
if failures[_dev]['address'] in dli_power:
del dli_power[failures[_dev]['address']]
self.data['failures'][_dev] = failures[_dev]
# restore outlets that failed on menu launch but found reachable during refresh
for _dev in outlet_data.copy():
if _dev not in self.data['defined']:
self.data['defined'][_dev] = outlet_data[_dev]
if _dev in self.data['failures']:
del self.data['failures'][_dev]
self.data['dli_power'] = dli_power
self.data['esp_power'] = esp_power
log.debug(f"[PWR VRFY (pwr_get_outlets)] Done Processing {', '.join(outlet_data.keys())}")
return self.data
def pwr_toggle(self, pwr_type, address, desired_state=None, port=None, noff=True, noconfirm=False):
'''Toggle Power On the specified port
args:
pwr_type: valid types = 'dli', 'tasmota', 'GPIO' (not case sensitive)
address: for dli and tasmota: str - ip or fqdn
kwargs:
desired_state: bool The State True|False (True = ON) you want the outlet to be in
if not provided the method will query the current state of the port and set desired_state to the inverse
port: Only required for dli: can be type str | int | list.
valid:
int: representing the dli outlet #
list: list of outlets(int) to perform operation on
str: 'all' ~ to perform operation on all outlets
noff: Bool, default: True. = normally off, only applies to GPIO based outlets.
If an outlet is normally off (True) = the relay/outlet is off if no power is applied via GPIO
Setting noff to False flips the ON/OFF evaluation so the menu will show the port is ON when no power is applied.
returns:
Bool representing resulting port state (True = ON)
'''
# --// REMOVE ONCE VERIFIED \\--
# if isinstance(desired_state, str): # menu should be passing in True/False no on off now. can remove once that's verified
# desired_state = False if desired_state.lower() == 'off' else True
# print('\ndev_note: pwr_toggle passed str not bool for desired_state check calling function {}'.format(desired_state))
# time.sleep(5)
# -- // Toggle dli web power switch port \\ --
if pwr_type.lower() == 'dli':
if port is not None:
return self._dli[address].toggle(port, toState=desired_state)
# -- // Toggle GPIO port \\ --
elif pwr_type.upper() == 'GPIO':
gpio = address
# get current state and determine inverse if toggle called with no desired_state specified
if desired_state is None:
cur_state = bool(GPIO.input(gpio)) if noff else not bool(GPIO.input(gpio))
desired_state = not cur_state
if desired_state:
GPIO.output(gpio, int(noff))
else:
GPIO.output(gpio, int(not noff))
return bool(GPIO.input(gpio)) if noff else not bool(GPIO.input(gpio))
# -- // Toggle TASMOTA port \\ --
elif pwr_type.lower() == 'tasmota':
if desired_state is None:
desired_state = not self.do_tasmota_cmd(address)
return self.do_tasmota_cmd(address, desired_state)
# -- // Toggle espHome port \\ --
elif pwr_type.lower() == 'esphome':
if desired_state is None:
desired_state = not self.do_esphome_cmd(address, port)
return self.do_esphome_cmd(address, port, desired_state)
else:
raise Exception('pwr_toggle: Invalid type ({}) or no name provided'.format(pwr_type))
def pwr_cycle(self, pwr_type, address, port=None, noff=True):
'''returns Bool True = Power Cycle success, False Not performed Outlet OFF
TODO Check error handling if unreachable
'''
pwr_type = pwr_type.lower()
# --// CYCLE DLI PORT \\--
if pwr_type == 'dli':
if port is not None:
return self._dli[address].cycle(port)
else:
raise ConsolePiPowerException('pwr_cycle: port must be provided for outlet type dli')
# --// CYCLE GPIO PORT \\--
elif pwr_type == 'gpio':
gpio = address
# normally off states are normal 0:off, 1:on - if not normally off it's reversed 0:on 1:off
cur_state = GPIO.input(gpio) if noff else not GPIO.input(gpio)
if cur_state:
GPIO.output(gpio, int(not noff))
time.sleep(config.cycle_time)
GPIO.output(gpio, int(noff))
response = bool(GPIO.input(gpio))
return response if noff else not response
else:
return False # Cycle is not valid on ports that are already off
# --// CYCLE TASMOTA PORT \\--
elif pwr_type == 'tasmota':
# response = self.do_tasmota_cmd(address) # this logic is handled in the function now
# if response: # Only Cycle if outlet is currently ON
return self.do_tasmota_cmd(address, 'cycle')
# --// CYCLE ESPHOME PORT \\--
elif pwr_type == 'esphome':
return self.do_esphome_cmd(address, port, 'cycle')
def pwr_rename(self, type, address, name=None, port=None):
if name is None:
try:
name = input('New name for {} port: {} >> '.format(address, port))
except KeyboardInterrupt:
print('Rename Aborted!')
return 'Rename Aborted'
if type.lower() == 'dli':
if port is not None:
response = self._dli[address].rename(port, name)
if response:
self.data['dli_power'][address][port]['name'] = name
else:
response = 'ERROR port must be provided for outlet type dli'
elif type.lower() in ['gpio', 'tasmota', 'esphome']:
print('rename of GPIO, tasmota, and espHome ports not yet implemented')
print('They can be renamed manually by updating ConsolePi.yaml')
response = 'rename of GPIO, tasmota, and espHome ports not yet implemented'
# TODO get group name based on address, read json file into dict, change the name write it back
# and update dict
else:
raise ConsolePiPowerException('pwr_rename: Invalid type ({}) or no name provided'.format(type))
return response
def pwr_all(self, outlets=None, action='toggle', desired_state=None):
'''
Returns List of responses representing state of outlet after exec
Valid response is Bool where True = ON
Errors are returned in str format
'''
if action == 'toggle' and desired_state is None:
return 'Error: desired final state must be provided' # should never hit this
if outlets is None:
outlets = self.pwr_get_outlets()['defined']
responses = []
for grp in outlets:
outlet = outlets[grp]
# if no_all: true in config outlet is ignored during all off/on operations
if outlet.get("no_all"):
continue
noff = True if 'noff' not in outlet else outlet['noff']
if action == 'toggle':
# skip any defined dlis that don't have any linked_outlets defined
# if not outlet['type'] == 'dli' or outlet.get('linked_devs')):
if outlet['type'] == 'dli':
if outlet.get('linked_devs'):
responses.append(
self.pwr_toggle(outlet['type'], outlet['address'], desired_state=desired_state, port=self.update_linked_devs(outlet)[1], noff=noff, noconfirm=True)
)
elif outlet['type'] == 'esphome':
_relays = utils.listify(outlet.get('relays'))
for p in _relays:
responses.append(self.pwr_toggle(outlet['type'], outlet['address'], desired_state=desired_state,
port=p, noff=noff, noconfirm=True))
else:
responses.append(self.pwr_toggle(outlet['type'], outlet['address'], desired_state=desired_state,
noff=noff, noconfirm=True))
elif action == 'cycle':
if outlet['type'] == 'dli':
if 'linked_ports' in outlet:
linked_ports = utils.listify(outlet['linked_ports'])
for p in linked_ports:
# Start a thread for each port run in parallel
# menu status for (linked) power menu is updated on load
threading.Thread(
target=self.pwr_cycle,
args=[outlet['type'], outlet['address']],
kwargs={'port': p, 'noff': noff},
name=f'cycle_{p}'
).start()
elif outlet['type'] == 'esphome':
relays = utils.listify(outlet.get('relays', []))
for p in relays:
# Start a thread for each port run in parallel
threading.Thread(
target=self.pwr_cycle,
args=[outlet['type'], outlet['address']],
kwargs={'port': p, 'noff': noff},
name=f'cycle_{p}'
).start()
else:
threading.Thread(
target=self.pwr_cycle,
args=[outlet['type'], outlet['address']],
kwargs={'noff': noff},
name='cycle_{}'.format(outlet['address'])
).start()
# Wait for all threads to complete
while True:
threads = 0
for t in threading.enumerate():
if 'cycle' in t.name or 'toggle_' in t.name:
threads += 1
if threads == 0:
break
return responses
|
f73c29587437efa9bb8e43a67bbb6765d47dc57c
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/loading/early-hints/resources/csp-document-disallow-loader.h2.py
|
bffa90c753df8039846ac741c918d65d510ba950
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
csp-document-disallow-loader.h2.py
|
import os
def handle_headers(frame, request, response):
# Send a 103 response.
resource_url = request.GET.first(b"resource-url").decode()
link_header_value = "<{}>; rel=preload; as=script".format(resource_url)
early_hints = [
(b":status", b"103"),
(b"link", link_header_value),
]
early_hints_policy = request.GET.first(b"early-hints-policy").decode()
# In this test handler "allowed" or "absent" are only valid policies because
# csp-document-disallow.html always sets CSP to disallow the preload.
# "disallowed" makes no observable changes in the test. Note that
# csp-basic.html covers disallowing preloads in Early Hints.
assert early_hints_policy == "allowed" or early_hints_policy == "absent"
if early_hints_policy == "allowed":
resource_origin = request.GET.first(b"resource-origin").decode()
csp_value = "script-src 'self' 'unsafe-inline' {}".format(resource_origin)
early_hints.append((b"content-security-policy", csp_value))
response.writer.write_raw_header_frame(headers=early_hints,
end_headers=True)
# Send the final response header.
response.status = 200
response.headers["content-type"] = "text/html"
response.write_status_headers()
def main(request, response):
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, "csp-document-disallow.html")
with open(file_path, "r") as f:
test_content = f.read()
response.writer.write_data(item=test_content, last=True)
|
ac6327f380f7cba5b29dafb7c0ffd14e2222e2d1
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/whats_left.py
|
7f3ad80c6364ef62d2dc15f1b4a73061f6a26346
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 15,579
|
py
|
whats_left.py
|
#!/usr/bin/env -S python3 -I
# This script generates Lib/snippets/whats_left_data.py with these variables defined:
# expected_methods - a dictionary mapping builtin objects to their methods
# cpymods - a dictionary mapping module names to their contents
# libdir - the location of RustPython's Lib/ directory.
#
# TODO: include this:
# which finds all modules it has available and
# creates a Python dictionary mapping module names to their contents, which is
# in turn used to generate a second Python script that also finds which modules
# it has available and compares that against the first dictionary we generated.
# We then run this second generated script with RustPython.
import argparse
import re
import os
import re
import sys
import json
import warnings
import inspect
import subprocess
import platform
from pydoc import ModuleScanner
if not sys.flags.isolated:
print("running without -I option.")
print("python -I whats_left.py")
exit(1)
GENERATED_FILE = "extra_tests/not_impl.py"
implementation = platform.python_implementation()
if implementation != "CPython":
sys.exit(f"whats_left.py must be run under CPython, got {implementation} instead")
if sys.version_info[:2] < (3, 11):
sys.exit(f"whats_left.py must be run under CPython 3.11 or newer, got {implementation} {sys.version} instead")
def parse_args():
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--signature",
action="store_true",
help="print functions whose signatures don't match CPython's",
)
parser.add_argument(
"--doc",
action="store_true",
help="print elements whose __doc__ don't match CPython's",
)
parser.add_argument(
"--json",
action="store_true",
help="print output as JSON (instead of line by line)",
)
args = parser.parse_args()
return args
args = parse_args()
# modules suggested for deprecation by PEP 594 (www.python.org/dev/peps/pep-0594/)
# some of these might be implemented, but they are not a priority
PEP_594_MODULES = {
"aifc",
"asynchat",
"asyncore",
"audioop",
"binhex",
"cgi",
"cgitb",
"chunk",
"crypt",
"formatter",
"fpectl",
"imghdr",
"imp",
"macpath",
"msilib",
"nntplib",
"nis",
"ossaudiodev",
"parser",
"pipes",
"smtpd",
"sndhdr",
"spwd",
"sunau",
"telnetlib",
"uu",
"xdrlib",
}
# CPython specific modules (mostly consisting of templates/tests)
CPYTHON_SPECIFIC_MODS = {
'xxmodule', 'xxsubtype', 'xxlimited', '_xxtestfuzz'
'_testbuffer', '_testcapi', '_testimportmultiple', '_testinternalcapi', '_testmultiphase',
}
IGNORED_MODULES = {"this", "antigravity"} | PEP_594_MODULES | CPYTHON_SPECIFIC_MODS
sys.path = [
path
for path in sys.path
if ("site-packages" not in path and "dist-packages" not in path)
]
def attr_is_not_inherited(type_, attr):
"""
returns True if type_'s attr is not inherited from any of its base classes
"""
bases = type_.__mro__[1:]
return getattr(type_, attr) not in (getattr(base, attr, None) for base in bases)
def extra_info(obj):
if callable(obj) and not inspect._signature_is_builtin(obj):
doc = inspect.getdoc(obj)
try:
sig = str(inspect.signature(obj))
# remove function memory addresses
return {
"sig": re.sub(" at 0x[0-9A-Fa-f]+", " at 0xdeadbeef", sig),
"doc": doc,
}
except Exception as e:
exception = repr(e)
# CPython uses ' RustPython uses "
if exception.replace('"', "'").startswith("ValueError('no signature found"):
return {
"sig": "ValueError('no signature found')",
"doc": doc,
}
return {
"sig": exception,
"doc": doc,
}
return {
"sig": None,
"doc": None,
}
def name_sort_key(name):
if name == "builtins":
return ""
if name[0] == "_":
return name[1:] + "1"
return name + "2"
def gen_methods():
types = [
bool,
bytearray,
bytes,
complex,
dict,
enumerate,
filter,
float,
frozenset,
int,
list,
map,
memoryview,
range,
set,
slice,
str,
super,
tuple,
object,
zip,
classmethod,
staticmethod,
property,
Exception,
BaseException,
]
objects = [t.__name__ for t in types]
objects.append("type(None)")
iters = [
"type(bytearray().__iter__())",
"type(bytes().__iter__())",
"type(dict().__iter__())",
"type(dict().values().__iter__())",
"type(dict().items().__iter__())",
"type(dict().values())",
"type(dict().items())",
"type(set().__iter__())",
"type(list().__iter__())",
"type(range(0).__iter__())",
"type(str().__iter__())",
"type(tuple().__iter__())",
"type(memoryview(bytearray(b'0')).__iter__())",
]
methods = {}
for typ_code in objects + iters:
typ = eval(typ_code)
attrs = []
for attr in dir(typ):
if attr_is_not_inherited(typ, attr):
attrs.append((attr, extra_info(getattr(typ, attr))))
methods[typ.__name__] = (typ_code, extra_info(typ), attrs)
output = "expected_methods = {\n"
for name in sorted(methods.keys(), key=name_sort_key):
typ_code, extra, attrs = methods[name]
output += f" '{name}': ({typ_code}, {extra!r}, [\n"
for attr, attr_extra in attrs:
output += f" ({attr!r}, {attr_extra!r}),\n"
output += " ]),\n"
if typ_code != objects[-1]:
output += "\n"
output += "}\n\n"
return output
def scan_modules():
"""taken from the source code of help('modules')
https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178"""
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == ".__init__":
modname = modname[:-9] + " (package)"
if modname.find(".") < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
with warnings.catch_warnings():
# ignore warnings from importing deprecated modules
warnings.simplefilter("ignore")
ModuleScanner().run(callback, onerror=onerror)
return list(modules.keys())
def import_module(module_name):
import io
from contextlib import redirect_stdout
# Importing modules causes ('Constant String', 2, None, 4) and
# "Hello world!" to be printed to stdout.
f = io.StringIO()
with warnings.catch_warnings(), redirect_stdout(f):
# ignore warnings caused by importing deprecated modules
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
module = __import__(module_name)
except Exception as e:
return e
return module
def is_child(module, item):
import inspect
item_mod = inspect.getmodule(item)
return item_mod is module
def dir_of_mod_or_error(module_name, keep_other=True):
module = import_module(module_name)
item_names = sorted(set(dir(module)))
result = {}
for item_name in item_names:
item = getattr(module, item_name)
# don't repeat items imported from other modules
if keep_other or is_child(module, item) or inspect.getmodule(item) is None:
result[item_name] = extra_info(item)
return result
def gen_modules():
# check name because modules listed have side effects on import,
# e.g. printing something or opening a webpage
modules = {}
for mod_name in sorted(scan_modules(), key=name_sort_key):
if mod_name in IGNORED_MODULES:
continue
# when generating CPython list, ignore items defined by other modules
dir_result = dir_of_mod_or_error(mod_name, keep_other=False)
if isinstance(dir_result, Exception):
print(
f"!!! {mod_name} skipped because {type(dir_result).__name__}: {str(dir_result)}",
file=sys.stderr,
)
continue
modules[mod_name] = dir_result
return modules
output = """\
# WARNING: THIS IS AN AUTOMATICALLY GENERATED FILE
# EDIT extra_tests/not_impl_gen.sh, NOT THIS FILE.
# RESULTS OF THIS TEST DEPEND ON THE CPYTHON
# VERSION AND PYTHON ENVIRONMENT USED
# TO RUN not_impl_mods_gen.py
"""
output += gen_methods()
output += f"""
cpymods = {gen_modules()!r}
libdir = {os.path.abspath("Lib/").encode('utf8')!r}
"""
# Copy the source code of functions we will reuse in the generated script
REUSED = [
attr_is_not_inherited,
extra_info,
dir_of_mod_or_error,
import_module,
is_child,
]
for fn in REUSED:
output += "".join(inspect.getsourcelines(fn)[0]) + "\n\n"
# Prevent missing variable linter errors from compare()
expected_methods = {}
cpymods = {}
libdir = ""
# This function holds the source code that will be run under RustPython
def compare():
import inspect
import io
import os
import re
import sys
import warnings
from contextlib import redirect_stdout
import json
import platform
def method_incompatibility_reason(typ, method_name, real_method_value):
has_method = hasattr(typ, method_name)
if not has_method:
return ""
is_inherited = not attr_is_not_inherited(typ, method_name)
if is_inherited:
return "(inherited)"
value = extra_info(getattr(typ, method_name))
if value != real_method_value:
return f"{value} != {real_method_value}"
return None
not_implementeds = {}
for name, (typ, real_value, methods) in expected_methods.items():
missing_methods = {}
for method, real_method_value in methods:
reason = method_incompatibility_reason(typ, method, real_method_value)
if reason is not None:
missing_methods[method] = reason
if missing_methods:
not_implementeds[name] = missing_methods
if platform.python_implementation() == "CPython":
if not_implementeds:
sys.exit("ERROR: CPython should have all the methods")
mod_names = [
name.decode()
for name, ext in map(os.path.splitext, os.listdir(libdir))
if ext == b".py" or os.path.isdir(os.path.join(libdir, name))
]
mod_names += list(sys.builtin_module_names)
# Remove easter egg modules
mod_names = sorted(set(mod_names) - {"this", "antigravity"})
rustpymods = {mod: dir_of_mod_or_error(mod) for mod in mod_names}
result = {
"cpython_modules": {},
"implemented": {},
"not_implemented": {},
"failed_to_import": {},
"missing_items": {},
"mismatched_items": {},
"mismatched_doc_items": {},
}
for modname, cpymod in cpymods.items():
rustpymod = rustpymods.get(modname)
if rustpymod is None:
result["not_implemented"][modname] = None
elif isinstance(rustpymod, Exception):
result["failed_to_import"][modname] = rustpymod.__class__.__name__ + str(rustpymod)
else:
implemented_items = sorted(set(cpymod) & set(rustpymod))
mod_missing_items = set(cpymod) - set(rustpymod)
mod_missing_items = sorted(
f"{modname}.{item}" for item in mod_missing_items
)
mod_mismatched_items = [
(f"{modname}.{item}", rustpymod[item]["sig"], cpymod[item]["sig"])
for item in implemented_items
if rustpymod[item]["sig"] != cpymod[item]["sig"]
and not isinstance(cpymod[item]["sig"], Exception)
]
mod_mismatched_doc_items = [
(f"{modname}.{item}", rustpymod[item]["doc"], cpymod[item]["doc"])
for item in implemented_items
if rustpymod[item]["doc"] != cpymod[item]["doc"]
]
if mod_missing_items or mod_mismatched_items:
if mod_missing_items:
result["missing_items"][modname] = mod_missing_items
if mod_mismatched_items:
result["mismatched_items"][modname] = mod_mismatched_items
if mod_mismatched_doc_items:
result["mismatched_doc_items"][modname] = mod_mismatched_doc_items
else:
result["implemented"][modname] = None
result["cpython_modules"] = cpymods
result["not_implementeds"] = not_implementeds
print(json.dumps(result))
def remove_one_indent(s):
indent = " "
return s[len(indent) :] if s.startswith(indent) else s
compare_src = inspect.getsourcelines(compare)[0][1:]
output += "".join(remove_one_indent(line) for line in compare_src)
with open(GENERATED_FILE, "w", encoding='utf-8') as f:
f.write(output + "\n")
subprocess.run(["cargo", "build", "--release", "--features=ssl"], check=True)
result = subprocess.run(
["cargo", "run", "--release", "--features=ssl", "-q", "--", GENERATED_FILE],
env={**os.environ.copy(), "RUSTPYTHONPATH": "Lib"},
text=True,
capture_output=True,
)
# The last line should be json output, the rest of the lines can contain noise
# because importing certain modules can print stuff to stdout/stderr
result = json.loads(result.stdout.splitlines()[-1])
if args.json:
print(json.dumps(result))
sys.exit()
# missing entire modules
print("# modules")
for modname in result["not_implemented"]:
print(modname, "(entire module)")
for modname, exception in result["failed_to_import"].items():
print(f"{modname} (exists but not importable: {exception})")
# missing from builtins
print("\n# builtin items")
for module, missing_methods in result["not_implementeds"].items():
for method, reason in missing_methods.items():
print(f"{module}.{method}" + (f" {reason}" if reason else ""))
# missing from modules
print("\n# stdlib items")
for modname, missing in result["missing_items"].items():
for item in missing:
print(item)
if args.signature:
print("\n# mismatching signatures (warnings)")
for modname, mismatched in result["mismatched_items"].items():
for i, (item, rustpy_value, cpython_value) in enumerate(mismatched):
if cpython_value and cpython_value.startswith("ValueError("):
continue # these items will never match
if rustpy_value is None or rustpy_value.startswith("ValueError("):
rustpy_value = f" {rustpy_value}"
print(f"{item}{rustpy_value}")
if cpython_value is None:
cpython_value = f" {cpython_value}"
print(f"{' ' * len(item)}{cpython_value}")
if i < len(mismatched) - 1:
print()
if args.doc:
print("\n# mismatching `__doc__`s (warnings)")
for modname, mismatched in result["mismatched_doc_items"].items():
for (item, rustpy_doc, cpython_doc) in mismatched:
print(f"{item} {repr(rustpy_doc)} != {repr(cpython_doc)}")
print()
print("# summary")
for error_type, modules in result.items():
print("# ", error_type, len(modules))
|
6c24c11095726999da47df4acfa8be3a5f6b9f25
|
0635a88f7e48f32c86246e1093b19f4cf48f1244
|
/tests/test_parse_cpu_string.py
|
27fdfd1313f911154ad1f2745cfea31aa21ec6f7
|
[
"Python-2.0",
"MIT"
] |
permissive
|
workhorsy/py-cpuinfo
|
0c8a14840849a53819a6a595812cd32b894c0503
|
f3f0fec58335b9699b9b294267c15f516045b1fe
|
refs/heads/master
| 2023-08-31T10:45:13.027313
| 2022-11-19T20:57:32
| 2022-11-19T20:57:32
| 16,787,349
| 290
| 62
|
MIT
| 2023-08-22T12:51:43
| 2014-02-13T00:49:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,885
|
py
|
test_parse_cpu_string.py
|
import unittest
from cpuinfo import *
import helpers
class TestParseCPUString(unittest.TestCase):
def test_to_decimal_string(self):
self.assertEqual('2.8', cpuinfo._to_decimal_string('2.80'))
self.assertEqual('2.0', cpuinfo._to_decimal_string('2'))
self.assertEqual('3.0', cpuinfo._to_decimal_string(3))
self.assertEqual('6.5', cpuinfo._to_decimal_string(6.5))
self.assertEqual('7.002', cpuinfo._to_decimal_string(7.002))
self.assertEqual('4.00000000001', cpuinfo._to_decimal_string('4.00000000001'))
self.assertEqual('5.0', cpuinfo._to_decimal_string('5.000000000000'))
self.assertEqual('0.0', cpuinfo._to_decimal_string('invalid'))
self.assertEqual('0.0', cpuinfo._to_decimal_string('8.778.9'))
self.assertEqual('0.0', cpuinfo._to_decimal_string(''))
self.assertEqual('0.0', cpuinfo._to_decimal_string(None))
def test_hz_short_to_full(self):
self.assertEqual((2800000000, 0), cpuinfo._hz_short_to_full('2.8', 9))
self.assertEqual((1200000, 0), cpuinfo._hz_short_to_full('1.2', 6))
self.assertEqual((3200000000, 0), cpuinfo._hz_short_to_full('3.2', 9))
self.assertEqual((9001200000, 0), cpuinfo._hz_short_to_full('9001.2', 6))
self.assertEqual((0, 0), cpuinfo._hz_short_to_full('0.0', 0))
self.assertEqual((2, 87), cpuinfo._hz_short_to_full('2.87', 0))
self.assertEqual((0, 0), cpuinfo._hz_short_to_full('invalid', 0))
self.assertEqual((0, 0), cpuinfo._hz_short_to_full('8.778.9', 0))
self.assertEqual((0, 0), cpuinfo._hz_short_to_full('', 0))
self.assertEqual((0, 0), cpuinfo._hz_short_to_full(None, 0))
def test_hz_friendly_to_full(self):
self.assertEqual((2800000000, 0), cpuinfo._hz_friendly_to_full('2.80GHz'))
self.assertEqual((1200000, 0), cpuinfo._hz_friendly_to_full('1.20 mHz'))
self.assertEqual((3693150000, 0), cpuinfo._hz_friendly_to_full('3693.15-MHz'))
self.assertEqual((12000000000, 0), cpuinfo._hz_friendly_to_full('12 GHz'))
self.assertEqual((2, 6), cpuinfo._hz_friendly_to_full('2.6 Hz'))
self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('0 Hz'))
self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('invalid'))
self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('8.778.9'))
self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full(''))
self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full(None))
def test_hz_short_to_friendly(self):
self.assertEqual('2.8000 GHz', cpuinfo._hz_short_to_friendly('2.8', 9))
self.assertEqual('1.2000 MHz', cpuinfo._hz_short_to_friendly('1.2', 6))
self.assertEqual('3.2000 GHz', cpuinfo._hz_short_to_friendly('3.2', 9))
self.assertEqual('1.3000 Hz', cpuinfo._hz_short_to_friendly('1.3', 0))
self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('0.0', 0))
self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('invalid', 0))
self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('8.778.9', 0))
self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('', 0))
self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly(None, 0))
def test_parse_cpu_brand_string(self):
hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) CPU G640 @ 2.80GHz')
self.assertEqual((hz, scale), ('2.8', 9))
hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) CPU @ 1.20MHz')
self.assertEqual((hz, scale), ('1.2', 6))
# NOTE: No @ symbol
hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) D CPU 3.20GHz')
self.assertEqual((hz, scale), ('3.2', 9))
# NOTE: No @ symbol and no Hz
hz, scale = cpuinfo._parse_cpu_brand_string('AMD Ryzen 7 2700X Eight-Core Processor')
self.assertEqual((hz, scale), ('0.0', 0))
def test_parse_cpu_brand_string_dx(self):
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (fam: 06, model: 2a, stepping: 07)")
self.assertEqual('Intel(R) Pentium(R) CPU G640 @ 2.80GHz', brand)
self.assertEqual((hz, scale), ('2.8', 9))
self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6))
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (family: 0x6, model: 0x2a, stepping: 0x7)")
self.assertEqual('Intel(R) Pentium(R) CPU G640 @ 2.80GHz', brand)
self.assertEqual((hz, scale), ('2.8', 9))
self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6))
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz")
self.assertEqual("Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz", brand)
self.assertEqual((hz, scale), ('2.93', 9))
self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None))
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (2793.73-MHz K8-class CPU)")
self.assertEqual("Intel(R) Pentium(R) CPU G640 @ 2.80GHz", brand)
self.assertEqual((hz, scale), ('2.8', 9))
self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None))
# NOTE: No @ symbol
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) D CPU 3.20GHz")
self.assertEqual("Intel(R) Pentium(R) D CPU 3.20GHz", brand)
self.assertEqual((hz, scale), ('3.2', 9))
self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None))
# NOTE: No @ symbol and no Hz
hz, scale, brand, vendor_id, stepping, model, family = \
cpuinfo._parse_cpu_brand_string_dx("AMD Ryzen 7 2700X Eight-Core Processor (3693.15-MHz K8-class CPU) (fam: 06, model: 2a, stepping: 07)")
self.assertEqual("AMD Ryzen 7 2700X Eight-Core Processor", brand)
self.assertEqual((hz, scale), ('3693.15', 6))
self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6))
|
b15b935730501a7776107a71b66d725149703802
|
8c242933401f3a191b4b38494197bac72635dc42
|
/src/MyNotes.py
|
8987eebea6b4d13fd5fe53ae8a519f42be1568cc
|
[] |
no_license
|
Acidham/alfred-markdown-notes
|
f7adbe4ff05622840275f8017930b4b79a28c7d7
|
7b196d905705f1c470edaef1c222ae91f29c99eb
|
refs/heads/master
| 2023-03-18T05:39:15.460611
| 2023-02-28T06:38:49
| 2023-02-28T06:38:49
| 183,815,202
| 123
| 17
| null | 2021-08-06T14:52:04
| 2019-04-27T19:46:53
|
Python
|
UTF-8
|
Python
| false
| false
| 20,713
|
py
|
MyNotes.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import os
import re
import sys
from collections import Counter, OrderedDict
from urllib.request import pathname2url
from Alfred3 import Tools
class Notes(object):
REPL_MAP = {
'[': '',
']': ' ',
'(': '',
')': ' ',
'\n': ' ',
'*': ' ',
',': ' ',
'.': ' ',
'-': ' ',
':': ' ',
'?': ' ',
'!': ' '
}
# Replacement map for Filename when new file created
CHAR_REPLACEMENT_MAP = {
'/': '-',
'\\': '-',
':': '-',
'|': '-',
',': '',
'#': '-'
}
# Fallback Content when no Template is available
FALLBACK_CONTENT = "---\n" \
"Created: {date}\n" \
"Tags: \n" \
"---\n" \
"# {title}\n" \
"```\n" \
"This is the fallback Template.\n" \
"Create your own template, see help!\n" \
"```"
def __init__(self):
if not self.isPython3():
Tools.log("PYTHON VERSION:", sys.version)
raise ModuleNotFoundError("Python version 3.7.0 or higher required!")
self.extension = self.__buildNotesExtension()
self.path = self.__buildNotesPath()
self.default_template = os.getenv('default_template')
self.template_tag = os.getenv('template_tag')
self.url_scheme = os.getenv('url_scheme')
self.search_yaml_tags_only = Tools.getEnvBool('search_yaml_tags_only')
self.default_date_format = os.getenv('default_date_format')
self.exact_match = Tools.getEnvBool('exact_match')
self.todo_newest_oldest = Tools.getEnvBool('todo_newest_oldest')
@staticmethod
def isPython3() -> bool:
"""
Check if script is executed with Python 3.7 or higher
Returns:
bool: True if Python3 else false
"""
is_python3 = True
if sys.version_info < (3, 7):
is_python3 = False
return is_python3
@staticmethod
def __buildNotesExtension() -> str:
"""
Get notes extension configured in workflow preference
Returns:
str: extension incl. dot e.g. .md
"""
ext = os.getenv('ext')
if ext is None:
ext = '.md'
return ext if '.' in ext else str().join(['.', ext])
@staticmethod
def __buildNotesPath() -> str:
"""
Create Notes path configured in preferences
Returns:
str: home path to notes directory
"""
user_dir = os.path.expanduser('~')
path = os.getenv('path_to_notes')
if path.startswith('~'):
path = path.replace('~', user_dir)
if not (path.startswith('/')):
path = os.path.join("/", path)
if not (path.startswith('/Users')):
path = os.path.join(user_dir, path)
if not (os.path.exists(path)):
sys.stderr.write(f"ERROR: {path} is not a valid notes directory. Add a valid path for path_to_notes")
sys.exit(0)
return path
@staticmethod
def getTodayDate(fmt: str = "%d.%m.%Y") -> str:
"""
Get today's date
Args:
fmt (str, optional): Date format. Defaults to "%d.%m.%Y".
Returns:
str: formatted today's date
"""
now = datetime.datetime.now()
return now.strftime(fmt)
def getDefaultDate(self) -> str:
"""
Read default date format from environment variable
Returns:
str: default date format file name or default format
"""
return "%d.%m.%Y %H.%M" if self.default_date_format == str() else self.default_date_format
def getNotesPath(self) -> str:
"""
Get path to notes home directory
Returns:
str: Path to notes home
"""
return self.path
def getNotesExtension(self) -> str:
"""
Get notes extension from .env
Returns:
str: File extension for md files
"""
return self.extension
@staticmethod
def strJoin(*args: str) -> str:
"""
Join multiple strings
Arguments:
*args (str): strings to join
Returns:
(str): joined string
"""
return str().join(args)
@staticmethod
def strReplace(text: str, replace_map: dict, lowercase: bool = True) -> str:
"""
Replace in text from a replacement map
Args:
text (str): The string which needs to be processed
replace_map (dict): dict with search:replace
Returns:
str : String with replacements
"""
for k in replace_map.keys():
text = text.replace(k, replace_map[k])
return text.lower() if lowercase else text
class Search(Notes):
"""
Search in Notes
Returns:
(object): a Search object
"""
def __init__(self):
super(Search, self).__init__()
def _match(self, search_terms, content, operator):
"""
Find matches of search_terms list with OR or AND
Args:
search_terms (list): Search terms
content (str): Text to search
operator (str): 'OR' or 'AND'
Returns:
bool: True if search terms matches
"""
content = content.lower()
content = self.strReplace(content, self.REPL_MAP)
word_list = content.split(' ')
word_list = [self._chop(w, '#') for w in word_list]
search_terms = [s.lower() for s in search_terms]
match = False
matches = list()
for st in search_terms:
search_str = st.replace('*', str())
# search if search term contains a whitespace
if ' ' in st:
regexp = re.compile(f'({st})', re.I)
match = True if len(re.findall(regexp, content)) > 0 else False
# search if wildcard search in the end
elif st.endswith('*'):
match_list = [x for x in word_list if x.startswith(search_str)]
match = True if len(match_list) > 0 else False
# search if wildcard search in front
elif st.startswith('*'):
match_list = [x for x in word_list if x.endswith(search_str)]
match = True if len(match_list) > 0 else False
# search if exact match is true
elif self.exact_match:
match = True if search_str in word_list else False
# search with exact match is false
else:
match = True if search_str in str(word_list) else False
matches.append(match)
match = all(matches) if operator == 'AND' else any(matches)
return match
def notes_search(self, search_terms: list, search_type: str) -> list:
"""
Search with search terms in all markdown files
Args:
search_terms (list): Search terms in a list
search_type (str): OR or AND search
Returns:
list: list of files matches the search
"""
file_list = self.getFilesListSorted()
#search_terms = [normalize('NFD', s.decode('utf-8')) for s in search_terms]
new_list = list()
if file_list is not None:
for f in file_list:
content = self._getFileContent(f['path'])
if content != str() and (search_type == 'and' and self._match(search_terms, content, 'AND')) or (
search_type == 'or' and self._match(search_terms, content, 'OR')):
new_list.append(f)
return new_list
def url_search(self, search_terms: list) -> list:
"""
Search Notes with bookmarks (URLs)
Args:
search_terms (list): Search terms in a list
Returns:
list: List of Notes found
"""
notes = self.notes_search(search_terms, 'and')
note_list = list()
if notes:
for f in notes:
note_title = f['title']
note_path = f['path']
content = self._getFileContent(f['path'])
matches = re.findall(r'\[(.*)\]\((https?.*)\)', content)
link_list = list()
# TODO: Implement url only match, links without markdown syntax
# url_only_matches = re.findall(r'https?://', content)
for m in matches:
url_title = m[0]
url = m[1]
link_list.append({'url_title': url_title, 'url': url})
note_list.append({'title': note_title, 'path': note_path, 'links': link_list})
return note_list
def getNoteTitle(self, path: str) -> str:
"""
Get the title of a note
Args:
path (str): Full path to note
Returns:
str: Title of the note
"""
content = self._getFileContent(path)
title = self._chop(os.path.basename(path), self.extension)
obj = re.search(r'^#{1}\s{1}(.*)', content, re.MULTILINE | re.UNICODE)
if obj is not None:
title = obj.group(1) if len(re.findall(r'\{.*\}', obj.group(1))) == 0 else title
# return title.encode('ascii', 'ignore').decode('ascii')
return title
@staticmethod
def _chop(theString: str, ext: str) -> str:
if theString.endswith(ext):
return theString[:-len(ext)]
return theString
def getFileMeta(self, path: str, item: str) -> str:
"""
Get file meta data of given file
Args:
path (str): file path
item (str): meta data name
Returns:
item str(): Metadata of the file
"""
# os.stat_float_times(True)
file_stats = os.stat(path)
switch = {
'ctime': file_stats.st_birthtime,
'mtime': file_stats.st_mtime,
'size': file_stats.st_size
}
return switch[item]
def getFilesListSorted(self, reverse: bool = True) -> list:
"""
Get list of files in directory as dict
Args:
reverse (boolean): True to sort reverse
Returns:
list(dict): sorted dict with file meta information
"""
err = 0
file_list = list()
try:
file_list = os.listdir(self.path)
# file_list = os.walk(self.path)
except OSError as e:
err = e.errno
pass
if err == 0:
seq = list()
for f in file_list:
f_path = os.path.join(self.path, f)
not (f.startswith('.')) and f.endswith(self.extension) and seq.append({
'filename': f,
'path': f_path,
'title': self.getNoteTitle(f_path),
'ctime': self.getFileMeta(f_path, 'ctime'),
'mtime': self.getFileMeta(f_path, 'mtime'),
'size': self.getFileMeta(f_path, 'size')
})
sorted_file_list = sorted(seq, key=lambda k: k['mtime'], reverse=reverse)
return sorted_file_list
def tagSearch(self, tag, sort_by: str = 'tag', reverse: bool = False) -> list:
"""
Search for notes with tag
Args:
tag (str): tag to search for in a note
sort_by (str, optional): Sort results by. Defaults to 'tag'.
reverse (bool, optional): Sort reverse. Defaults to False.
Returns:
list(dict): results list with dicts
"""
i = {'tag': 0, 'count': 1}
matches = list()
sorted_file_list = self.getFilesListSorted()
regex = re.compile(
r'#{1}(\w+)\s?', re.I) if tag == '' else re.compile(r'#{1}(' + tag + r'\w*)\s?', re.I | re.UNICODE)
for f in sorted_file_list:
content = self._getFileContent(f['path'])
if content != str():
if self.search_yaml_tags_only:
match_obj = re.search(r'\bTags:.*', content, re.IGNORECASE | re.UNICODE)
if match_obj:
r = match_obj.group(0)
results = re.findall(regex, r)
matches.extend(results)
else:
results = re.findall(regex, content)
matches.extend(results)
counted_matches = Counter([v.lower() for v in matches])
# Sorted by match counter x[1] if sort by key (tag name) is required change to x[0]
sorted_matches = OrderedDict(
sorted(counted_matches.items(), key=lambda x: x[i[sort_by]], reverse=reverse))
return sorted_matches
def todoSearch(self, todo: str) -> list:
"""
Search for todos in md notes
Args:
todo (str): Search string
Returns:
list(dict): returns matches as list with dict
"""
matches = list()
sorted_file_list = self.getFilesListSorted()
regex = re.compile(r'[-|\*] {1}\[ \] {1}(.+)', re.I) if todo == '' else re.compile(
r'[-|\*] {1}\[ \] {1}(.*' + todo + '.+)', re.I)
for f in sorted_file_list:
content = self._getFileContent(f['path'])
if content != str():
results = re.findall(regex, content)
for i in results:
r_dict = {
'path': f['path'],
'todo': i.replace("*", ""),
'filename': f['filename'],
'title': f['title'],
'mtime': self.getFileMeta(f['path'], 'mtime'),
'ctime': self.getFileMeta(f['path'], 'ctime')
}
matches.append(r_dict)
ret_list_dict = sorted(matches, key=lambda k: k['mtime'], reverse=self.todo_newest_oldest)
return ret_list_dict
def _getFileContent(self, file_path: str) -> str:
"""
Read file content from md/txt file
Args:
file_path (str): Path to file to read
Returns:
str: content
"""
if str(file_path).endswith(self.extension):
with open(file_path, 'r') as c:
content = c.read()
else:
content = str()
return content
def isNoteTagged(self, file_path: str, tag: str) -> bool:
"""
Is the note tagged with tag?
Args:
file_path (str): path to note
tag (str): tag to search for
Returns:
boolean: True if note is tagged otherwise false
"""
match = False
with open(file_path, 'r') as c:
lines = c.readlines()[0:5]
for line in lines:
match_obj = re.search(r'Tags:.*' + tag, line, re.IGNORECASE)
if match_obj:
match = True
break
return match
@staticmethod
def get_search_config(q: str) -> tuple:
"""
Returns search config tuple
Args:
q (string): Search Query e.g. Searchterm1&Searchtem2
Returns:
tuple: Search Terms and operator
"""
if '&' in q:
s_terms = q.split('&')
s_type = 'and'
elif '|' in q:
s_terms = q.split('|')
s_type = 'or'
elif q == str():
s_terms = list()
s_type = 'or'
else:
s_terms = [q]
s_type = 'or'
return s_terms, s_type
def getUrlScheme(self, f: str) -> str:
"""
Gets the URL Scheme setup in Alfred Preferences
Args:
f(str): md file to add at the end of url scheme
Returns:
str: URL scheme
"""
return self.strJoin(self.url_scheme, pathname2url(f))
class NewNote(Notes):
"""
Creates a new note with title, template and tags
Args:
note_title (str): Title of the Note
template_path (str): Path to the template used
tags (str): Tag line with format: #tag1 #tag2
content (str): Addtional content after Headline
"""
def __init__(self, note_title, template_path=str(), tags=str(), content=str()):
super(NewNote, self).__init__()
self.filename_format = self.getFilenameFormat()
self.tags = tags
self.content = content
self.note_title = note_title
self.note_path = self.getTargetFilePath(self.__normalize_filename(note_title))
self.template_path = self.getTemplate(template_path)
def getFilenameFormat(self):
"""
Get fileformat from WF env
Returns:
str: fileformat or fallback
"""
frmt_env = Tools.getEnv('filename_format')
if frmt_env is None or frmt_env.strip() == "":
frmt_env = '{title}'
return frmt_env
def getTargetFilePath(self, file_name: str) -> str:
"""
construct markdown file path
Returns:
str: markdown file path
"""
def applyFilenameFormat(title: str):
"""Appliies configured Fileformat to filename"""
frmt = self.filename_format
res = re.findall(r"\{[\.\-:%a-zA-Z]*\}", frmt)
for r in res:
if "%" in r:
date_f = r.replace('{', "").replace('}', "")
dte = self.getTodayDate(date_f)
frmt = frmt.replace(r, dte)
elif '{title}' in res:
frmt = frmt.replace(r, title)
return frmt
file_name = file_name.rstrip().lstrip()
file_name = applyFilenameFormat(file_name)
file_path = os.path.join(self.path, f"{file_name}{self.extension}")
if os.path.isfile(file_path):
new_file_name = Tools.strJoin(file_name, ' ', self.getTodayDate('%d-%m-%Y %H-%M-%S'))
file_path = os.path.join(self.path, f"{new_file_name}{self.extension}")
return file_path
def getDefaultTemplate(self) -> str:
"""
Read default template setting from environment variable
Returns:
str: default template file name
"""
return 'template.md' if self.default_template == str() else self.default_template
def getTemplate(self, template_path: str) -> str:
"""
Get template path from previous wf step, reads env variable
Returns:
str: path to template.md
"""
notes_path = self.path
default_template = self.getDefaultTemplate()
return os.path.join(notes_path, default_template) if template_path == str() else template_path
def readTemplate(self, **kwargs: str) -> str:
"""
Read template markdkown file and fill placeholder defined in template
with data provides as kwargs
Args:
file_path (str): Path to Template file
Returns:
str: Content
"""
if '#' not in self.template_tag or self.template_tag == str():
self.template_tag = '#Template'
if os.path.exists(self.template_path):
with open(self.template_path, "r") as f:
content = f.read()
else:
content = self.FALLBACK_CONTENT
content = content.replace(self.template_tag, '')
for k, v in kwargs.items():
content = content.replace('{' + k + '}', v)
tag_line = f'Tags: {self.tags} '
if self.tags:
content = content.replace('Tags: ', tag_line)
return content
def __normalize_filename(self, f: str) -> str:
"""
Replace special characters in filename of md file
Returns:
str: filename
"""
return self.strReplace(f, self.CHAR_REPLACEMENT_MAP, lowercase=False)
def createNote(self) -> str:
"""
Creates the markdown note
Returns:
str: full path to notes
"""
try:
with open(self.note_path, "w+") as f:
default_date = self.getDefaultDate()
file_content = self.readTemplate(
date=self.getTodayDate(default_date), title=self.note_title)
file_content = f"{file_content}\n{self.content}" if self.content else file_content
f.write(file_content)
return self.note_path
except IOError as e:
sys.stderr.write(e)
|
52637feb5d6208375497efea90e48b0273015e7b
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/src/programy/storage/stores/sql/dao/usergroup.py
|
0cda83d8aab8cf5317375f4f876cab1aa2b61ae1
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
usergroup.py
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from programy.storage.stores.sql.base import Base
from programy.storage.stores.utils import DAOUtils
class AuthoriseUser(Base):
__tablename__ = 'authusers'
id = Column(Integer, primary_key=True)
name = Column(String(48))
def __repr__(self):
return "<AuthoriseUser(id='%s', name='%s')>" % (DAOUtils.valid_id(self.id), self.name)
class UserRole(Base):
__tablename__ = 'userroles'
id = Column(Integer, primary_key=True)
user = Column(String(48))
role = Column(String(48))
def __repr__(self):
return "<UserRole(id='%s', user='%s', role='%s')>" % (DAOUtils.valid_id(self.id), self.user, self.role)
class UserGroup(Base):
__tablename__ = 'usergroups'
id = Column(Integer, primary_key=True)
user = Column(String(48))
group = Column(String(48))
def __repr__(self):
return "<UserGroup(id='%s', user='%s', group='%s')>" % (DAOUtils.valid_id(self.id), self.user, self.group)
class AuthoriseGroup(Base):
__tablename__ = 'authgroups'
id = Column(Integer, primary_key=True)
name = Column(String(48))
parent = Column(String(48), nullable=True)
def __repr__(self):
return "<AuthoriseGroup(id='%s', name='%s', parent='%s')>" % (DAOUtils.valid_id(self.id),
self.name,
self.parent)
class GroupGroup(Base):
__tablename__ = 'groupgroups'
id = Column(Integer, primary_key=True)
group = Column(String(48))
subgroup = Column(String(48))
def __repr__(self):
return "<GroupGroup(id='%s', group='%s', subgroup='%s')>" % (DAOUtils.valid_id(self.id),
self.group,
self.subgroup)
class GroupRole(Base):
__tablename__ = 'grouproles'
id = Column(Integer, primary_key=True)
group = Column(String(48))
role = Column(String(48))
def __repr__(self):
return "<GroupRole(id='%s', group='%s', role='%s')>" % (DAOUtils.valid_id(self.id), self.group, self.role)
class GroupUser(Base):
__tablename__ = 'groupusers'
id = Column(Integer, primary_key=True)
group = Column(String(48))
user = Column(String(48))
def __repr__(self):
return "<GroupUser(id='%s', group='%s', user='%s')>" % (DAOUtils.valid_id(self.id), self.group, self.user)
|
4ba5f405021c104572b3ab19ce59dd16af3cb693
|
56d6257e932e1397ab03b1e7ccc6231378665b04
|
/KeyLab_mkII/view_control.py
|
98f8942a8213ed1dc6ed5704bc668fff5ec53b8b
|
[] |
no_license
|
gluon/AbletonLive10.1_MIDIRemoteScripts
|
e6c8dc4956cff9630aaa36f3667994387ad1d0cf
|
2468b51eba7e5082b06f9e381b3e72027c5f272c
|
refs/heads/master
| 2023-01-10T18:37:46.504180
| 2022-12-23T09:21:48
| 2022-12-23T09:21:48
| 213,423,555
| 205
| 59
| null | 2021-02-12T16:15:01
| 2019-10-07T15:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
view_control.py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/KeyLab_mkII/view_control.py
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import find_if, listens, liveobj_valid
from ableton.v2.control_surface.control import ButtonControl
from KeyLab_Essential.view_control import ViewControlComponent as ViewControlComponentBase
MAIN_VIEWS = (u'Session', u'Arranger')
class ViewControlComponent(ViewControlComponentBase):
document_view_toggle_button = ButtonControl()
def __init__(self, *a, **k):
super(ViewControlComponent, self).__init__(*a, **k)
self.__on_focused_document_view_changed.subject = self.application.view
self.__on_focused_document_view_changed()
@document_view_toggle_button.pressed
def document_view_toggle_button(self, _):
is_session_visible = self.application.view.is_view_visible(u'Session', main_window_only=True)
self.show_view(u'Arranger' if is_session_visible else u'Session')
@listens(u'focused_document_view')
def __on_focused_document_view_changed(self):
self.document_view_toggle_button.color = u'View.{}'.format(self.application.view.focused_document_view)
|
0e436897b3d34f3bce2f4abc512d6c0b400711de
|
584f7b51d7cd529448e2fc0147557e26931ab17e
|
/sphinx-sources/Examples/tests/LensTimingTest.py
|
30068a0409b61cba012c7c8bd77c58bafc8b32f0
|
[
"BSD-3-Clause"
] |
permissive
|
opticspy/lightpipes
|
8ca0d2221a1b893de5e51fec9061e90b9145f5f8
|
f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef
|
refs/heads/master
| 2023-09-04T19:07:11.376631
| 2023-09-04T15:24:55
| 2023-09-04T15:24:55
| 80,127,706
| 191
| 55
|
BSD-3-Clause
| 2023-08-23T00:45:33
| 2017-01-26T15:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
LensTimingTest.py
|
# -*- coding: utf-8 -*-
"""
Benchmarking script to make sure the porting of the code to Python
produces correct results and compare the execution times.
Created on Mon Apr 6 22:38:34 2020
@author: Lenny
"""
import numpy as np
import matplotlib.pyplot as plt
from LightPipes import tictoc
from LightPipes.units import * #m, mm, ...
import LightPipes as lp
"""reference LightPipes (Cpp) renamed and installed with "setup.py develop" as
oldLightPipes"""
import oldLightPipes as olp
#******** Simulation parameters *************
wavelength=1*um
size=20.0*mm
N = 1000
f=10*cm
f1=10*m
f2=f1*f/(f1-f)
frac=f/f1
newsize=frac*size
w=5*mm
#********* Run for new python LP *******
F = lp.Begin(size, wavelength, N)
F = lp.RectAperture(w,w,0,0,0,F)
#1) Using Lens and Fresnel:
with tictoc.printtimer('Lens and Fresnel (N={})'.format(N)):
F1 = lp.Lens(f,0,0,F)
F1 = lp.Fresnel(f,F1)
I1 = lp.Intensity(0, F1)
phi1 = lp.Phase(F1)
# phi1 = lp.PhaseUnwrap(phi1)
#2) Using Lens + LensFresnel and Convert:
with tictoc.printtimer('With LensF+Convert (N={})'.format(N)):
F2 = lp.Lens(f1,0,0,F)
F2 = lp.LensFresnel(f2,f,F2)
F2 = lp.Convert(F2)
I2 = lp.Intensity(0,F2)
phi2 = lp.Phase(F2)
# phi2 = lp.PhaseUnwrap(phi2)
x_mm = F.xvalues/mm
#****** Run for reference cpp OLP *******
F = olp.Begin(size, wavelength, N)
F = olp.RectAperture(w,w,0,0,0,F)
#1) Using Lens and Fresnel:
with tictoc.printtimer('Lens and Fresnel OLP (N={})'.format(N)):
F1 = olp.Lens(f,0,0,F)
F1 = olp.Fresnel(f,F1)
I1olp = olp.Intensity(0, F1)
phi1 = olp.Phase(F1)
phi1 = olp.PhaseUnwrap(phi1)
#2) Using Lens + LensFresnel and Convert:
with tictoc.printtimer('With LensF+Convert (N={})'.format(N)):
F2 = olp.Lens(f1,0,0,F)
F2 = olp.LensFresnel(f2,f,F2)
F2 = olp.Convert(F2)
I2olp = olp.Intensity(0,F2)
phi2olp = olp.Phase(F2)
phi2olp = olp.PhaseUnwrap(phi2olp)
I1olp = np.asarray(I1olp)
I2olp = np.asarray(I2olp)
x_mm=[]
for i in range(N):
x_mm.append((-size/2+i*size/N)/mm)
x_mm = np.asarray(x_mm)
#*********** Plot results *******************
I1diff = I1 - I1olp
I2diff = I2 - I2olp
fig=plt.figure(figsize=(15,6))
ax1 = fig.add_subplot(131)
ax1.set_title('I1 - I1olp')
ax2 = fig.add_subplot(132)
ax2.set_title('I2 - I2olp')
ax3 = fig.add_subplot(133)
ax3.set_title('lineout in middle')
p1 = ax1.imshow(I1diff,cmap='rainbow'); ax1.axis('off')
fig.colorbar(p1, ax=ax1)
p2 = ax2.imshow(I2diff,cmap='rainbow'); ax2.axis('off')
fig.colorbar(p2, ax=ax2)
ax3.plot(x_mm, I1[int(N/2),:]); ax3.set_xlabel('x [mm]'); ax3.set_ylabel('Intensity [a.u.]')
ax3.plot(x_mm, I1olp[int(N/2),:]); ax3.legend(['LP','OLP'])
ax3.grid('on')
plt.show()
|
16dd4646aae5daf97cfd3dcaee140acee4c496b1
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/IPython/core/magic_arguments.py
|
24dd5418767244c39c3e7d0f8aff6d0071180a01
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 9,734
|
py
|
magic_arguments.py
|
''' A decorator-based method of constructing IPython magics with `argparse`
option handling.
New magic functions can be defined like so::
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
@magic_arguments()
@argument('-o', '--option', help='An optional argument.')
@argument('arg', type=int, help='An integer positional argument.')
def magic_cool(self, arg):
""" A really cool magic command.
"""
args = parse_argstring(magic_cool, arg)
...
The `@magic_arguments` decorator marks the function as having argparse arguments.
The `@argument` decorator adds an argument using the same syntax as argparse's
`add_argument()` method. More sophisticated uses may also require the
`@argument_group` or `@kwds` decorator to customize the formatting and the
parsing.
Help text for the magic is automatically generated from the docstring and the
arguments::
In[1]: %cool?
%cool [-o OPTION] arg
A really cool magic command.
positional arguments:
arg An integer positional argument.
optional arguments:
-o OPTION, --option OPTION
An optional argument.
Here is an elaborated example that uses default parameters in `argument` and calls the `args` in the cell magic::
from IPython.core.magic import register_cell_magic
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
@magic_arguments()
@argument(
"--option",
"-o",
help=("Add an option here"),
)
@argument(
"--style",
"-s",
default="foo",
help=("Add some style arguments"),
)
@register_cell_magic
def my_cell_magic(line, cell):
args = parse_argstring(my_cell_magic, line)
print(f"{args.option=}")
print(f"{args.style=}")
print(f"{cell=}")
In a jupyter notebook, this cell magic can be executed like this::
%%my_cell_magic -o Hello
print("bar")
i = 42
Inheritance diagram:
.. inheritance-diagram:: IPython.core.magic_arguments
:parts: 3
'''
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import argparse
import re
# Our own imports
from IPython.core.error import UsageError
from IPython.utils.decorators import undoc
from IPython.utils.process import arg_split
from IPython.utils.text import dedent
NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
@undoc
class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
"""A HelpFormatter with a couple of changes to meet our needs.
"""
# Modified to dedent text.
def _fill_text(self, text, width, indent):
return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
# Modified to wrap argument placeholders in <> where necessary.
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
# IPYTHON MODIFICATION: If args_string is not a plain name, wrap
# it in <> so it's valid RST.
if not NAME_RE.match(args_string):
args_string = "<%s>" % args_string
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
# Override the default prefix ('usage') to our % magic escape,
# in a code block.
def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
class MagicArgumentParser(argparse.ArgumentParser):
""" An ArgumentParser tweaked for use by IPython magics.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=None,
formatter_class=MagicHelpFormatter,
prefix_chars='-',
argument_default=None,
conflict_handler='error',
add_help=False):
if parents is None:
parents = []
super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
description=description, epilog=epilog,
parents=parents, formatter_class=formatter_class,
prefix_chars=prefix_chars, argument_default=argument_default,
conflict_handler=conflict_handler, add_help=add_help)
def error(self, message):
""" Raise a catchable error instead of exiting.
"""
raise UsageError(message)
def parse_argstring(self, argstring):
""" Split a string into an argument list and parse that argument list.
"""
argv = arg_split(argstring)
return self.parse_args(argv)
def construct_parser(magic_func):
""" Construct an argument parser using the function decorations.
"""
kwds = getattr(magic_func, 'argcmd_kwds', {})
if 'description' not in kwds:
kwds['description'] = getattr(magic_func, '__doc__', None)
arg_name = real_name(magic_func)
parser = MagicArgumentParser(arg_name, **kwds)
# Reverse the list of decorators in order to apply them in the
# order in which they appear in the source.
group = None
for deco in magic_func.decorators[::-1]:
result = deco.add_to_parser(parser, group)
if result is not None:
group = result
# Replace the magic function's docstring with the full help text.
magic_func.__doc__ = parser.format_help()
return parser
def parse_argstring(magic_func, argstring):
""" Parse the string of arguments for the given magic function.
"""
return magic_func.parser.parse_argstring(argstring)
def real_name(magic_func):
""" Find the real name of the magic.
"""
magic_name = magic_func.__name__
if magic_name.startswith('magic_'):
magic_name = magic_name[len('magic_'):]
return getattr(magic_func, 'argcmd_name', magic_name)
class ArgDecorator(object):
""" Base class for decorators to add ArgumentParser information to a method.
"""
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
func.decorators.append(self)
return func
def add_to_parser(self, parser, group):
""" Add this object's information to the parser, if necessary.
"""
pass
class magic_arguments(ArgDecorator):
""" Mark the magic as having argparse arguments and possibly adjust the
name.
"""
def __init__(self, name=None):
self.name = name
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
if self.name is not None:
func.argcmd_name = self.name
# This should be the first decorator in the list of decorators, thus the
# last to execute. Build the parser.
func.parser = construct_parser(func)
return func
class ArgMethodWrapper(ArgDecorator):
"""
Base class to define a wrapper for ArgumentParser method.
Child class must define either `_method_name` or `add_to_parser`.
"""
_method_name: str
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
if group is not None:
parser = group
getattr(parser, self._method_name)(*self.args, **self.kwds)
return None
class argument(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument().
Instances also serve to decorate command methods.
"""
_method_name = 'add_argument'
class defaults(ArgMethodWrapper):
""" Store arguments and keywords to pass to set_defaults().
Instances also serve to decorate command methods.
"""
_method_name = 'set_defaults'
class argument_group(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument_group().
Instances also serve to decorate command methods.
"""
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds)
class kwds(ArgDecorator):
""" Provide other keywords to the sub-parser constructor.
"""
def __init__(self, **kwds):
self.kwds = kwds
def __call__(self, func):
func = super(kwds, self).__call__(func)
func.argcmd_kwds = self.kwds
return func
__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
'parse_argstring']
|
1369c7632a4fb574d23dd43d768c105c8d84a0a2
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/NER/BiLSTM_CRF_Ner/train.py
|
92e85436e6fa715851fbca8e5678457cb13e937d
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,087
|
py
|
train.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/29 19:35
# @Author : xiaolu
# @FileName: train.py
# @Software: PyCharm
import pickle
import pdb
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from model import NERLSTM_CRF
from utils import get_tags, format_result
from config import Config
from rlog import _log_normal, _log_warning, _log_info, _log_error, _log_toomuch, _log_bg_blue, _log_bg_pp, _log_fg_yl, \
_log_fg_cy, _log_black, rainbow
import time
class NERDataset(Dataset):
def __init__(self, X, Y):
self.data = [{'x': X[i], 'y': Y[i]} for i in range(X.shape[0])]
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def train():
criterion = nn.CrossEntropyLoss(ignore_index=0)
optimizer = optim.Adam(model.parameters(), lr=Config.lr, weight_decay=Config.weight_decay)
for epoch in range(Config.max_epoch):
model.train()
model.to(Config.device)
for index, batch in enumerate(train_dataloader):
optimizer.zero_grad()
X = batch['x'].long().to(Config.device) # torch.Size([4, 60]) (batch_size, max_len)
y = batch['y'].long().to(Config.device) # torch.Size([4, 60]) (batch_size, max_len)
# CRF
loss = model.log_likelihood(X, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10)
optimizer.step()
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
o_str = 'time: {}, epoch: {}, step: {}, loss: {:6f}'.format(now_time, epoch, index, loss.item())
rainbow(o_str)
aver_loss = 0
preds, labels = [], []
for index, batch in enumerate(valid_dataloader):
model.eval()
val_x, val_y = batch['x'].long().to(Config.device), batch['y'].long().to(Config.device)
predict = model(val_x)
# CRF
loss = model.log_likelihood(val_x, val_y)
aver_loss += loss.item()
# 统计非0的,也就是真实标签的长度
leng = []
for i in val_y.cpu():
tmp = []
for j in i:
if j.item() > 0:
tmp.append(j.item())
leng.append(tmp)
for index, i in enumerate(predict):
preds += i[:len(leng[index])]
for index, i in enumerate(val_y.tolist()):
labels += i[:len(leng[index])]
aver_loss /= (len(valid_dataloader) * 64)
precision = precision_score(labels, preds, average='macro')
recall = recall_score(labels, preds, average='macro')
f1 = f1_score(labels, preds, average='macro')
report = classification_report(labels, preds)
print(report)
torch.save(model.state_dict(), './save_model/bilstm_ner.bin')
def predict(tag, input_str=""):
model.load_state_dict(torch.load('./save_model/bilstm_ner.bin'))
if not input_str:
input_str = input("请输入文本: ")
input_vec = [word2id.get(i, 0) for i in input_str]
# convert to tensor
sentences = torch.tensor(input_vec).view(1, -1)
paths = model(sentences)
entities = []
tags = get_tags(paths[0], tag, tag2id)
entities += format_result(tags, input_str, tag)
print(entities)
if __name__ == '__main__':
# 1. 加载数据集
with open(Config.pickle_path, 'rb') as inp:
word2id = pickle.load(inp)
id2word = pickle.load(inp)
tag2id = pickle.load(inp)
id2tag = pickle.load(inp)
x_train = pickle.load(inp)
y_train = pickle.load(inp)
x_test = pickle.load(inp)
y_test = pickle.load(inp)
x_valid = pickle.load(inp)
y_valid = pickle.load(inp)
print("train len:", len(x_train)) # train len: 24271
print("test len:", len(x_test)) # test len: 7585
print("valid len", len(x_valid)) # valid len 6068
train_dataset = NERDataset(x_train, y_train)
valid_dataset = NERDataset(x_valid, y_valid)
test_dataset = NERDataset(x_test, y_test)
train_dataloader = DataLoader(train_dataset, batch_size=Config.batch_size, shuffle=True, num_workers=Config.num_workers)
valid_dataloader = DataLoader(valid_dataset, batch_size=Config.batch_size, shuffle=True, num_workers=Config.num_workers)
test_dataloader = DataLoader(test_dataset, batch_size=Config.batch_size, shuffle=True, num_workers=Config.num_workers)
model = NERLSTM_CRF(Config.embedding_dim, Config.hidden_dim, Config.dropout, word2id, tag2id)
# train() # 训练
predict(tag='ns', input_str='我从西安来,你爷爷的大名叫路路, 来给我预测预测。')
|
d804bbcdd4fa41f6b51f91060f38347c2ceac22c
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/src/models/users/usdc_purchase.py
|
e559a5d7994521b5730da9086deb9c07961b06a1
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
usdc_purchase.py
|
import enum
from sqlalchemy import BigInteger, Column, DateTime, Enum, Integer, String, text
from src.models.base import Base
from src.models.model_utils import RepresentableMixin
class PurchaseType(str, enum.Enum):
track = "track"
playlist = "playlist"
album = "album"
class USDCPurchase(Base, RepresentableMixin):
__tablename__ = "usdc_purchases"
slot = Column(Integer, primary_key=True, nullable=False, index=True)
signature = Column(String, primary_key=True, nullable=False)
seller_user_id = Column(Integer, nullable=False, index=True)
buyer_user_id = Column(Integer, nullable=False, index=True)
amount = Column(BigInteger, nullable=False)
content_type = Column(Enum(PurchaseType), nullable=False, index=True)
content_id = Column(Integer, nullable=False)
created_at = Column(
DateTime, nullable=False, index=True, server_default=text("CURRENT_TIMESTAMP")
)
updated_at = Column(
DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP")
)
|
f5f24a1bac8cf14449675e3a6354462807646fcb
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/engines/soot/values/local.py
|
e252c35c9db7c6848e385922c24783e20b6cea54
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
local.py
|
from .base import SimSootValue
class SimSootValue_Local(SimSootValue):
__slots__ = ["id", "type"]
def __init__(self, name, type_):
super().__init__()
self.id = name
self.type = type_
def __repr__(self):
return self.id
@classmethod
def from_sootvalue(cls, soot_value, state):
return cls(soot_value.name, soot_value.type)
|
dc40452da4cde57bd1cce2f6054503cb43d061ed
|
86f3973554eb61b12528835851cbdc96aba9ccc0
|
/io_scene_xray/formats/dm/create.py
|
faabba09ad5f56f2fd8658e3f4179e9a4025e9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
PavelBlend/blender-xray
|
02d68e424ae9088221bafc1d0d9019690323d9da
|
a3abb9eb805182eec8ed8de4058dd744aee0e291
|
refs/heads/develop
| 2023-09-03T15:10:56.022070
| 2023-08-22T17:50:23
| 2023-08-22T17:50:23
| 20,459,902
| 150
| 40
|
BSD-2-Clause
| 2023-08-10T15:01:24
| 2014-06-03T21:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 8,788
|
py
|
create.py
|
# standart modules
import os
# blender modules
import bpy
import bmesh
# addon modules
from ... import text
from ... import log
from ... import utils
from ... import rw
def create_object(object_name):
bpy_mesh = bpy.data.meshes.new(object_name)
bpy_object = bpy.data.objects.new(object_name, bpy_mesh)
bpy_object.xray.is_details = True
utils.version.link_object(bpy_object)
utils.stats.created_obj()
utils.stats.created_msh()
return bpy_object, bpy_mesh
def check_estimated_material(material, det_model):
if not material.name.startswith(det_model.texture):
return False
if material.xray.eshader != det_model.shader:
return False
return True
def check_estimated_material_texture(material, det_model):
texture_filepart = det_model.texture.replace('\\', os.path.sep)
texture_found = False
if utils.version.IS_28:
texture_nodes = []
for node in material.node_tree.nodes:
if node.type in utils.version.IMAGE_NODES:
texture_nodes.append(node)
if len(texture_nodes) == 1:
texture_node = texture_nodes[0]
if texture_node.image:
if texture_filepart in texture_node.image.filepath:
texture_found = True
else:
for texture_slot in material.texture_slots:
if not texture_slot:
continue
if not hasattr(texture_slot.texture, 'image'):
continue
if not texture_slot.texture.image:
continue
if not texture_filepart in texture_slot.texture.image.filepath:
continue
texture_found = True
break
return texture_found
def find_bpy_texture(det_model, abs_image_path, alternative_image_path):
bpy_texture = bpy.data.textures.get(det_model.texture)
if bpy_texture:
if not hasattr(bpy_texture, 'image'):
bpy_texture = None
elif not bpy_texture.image:
bpy_texture = None
elif bpy_texture.image.filepath != abs_image_path:
if bpy_texture.image.filepath != alternative_image_path:
bpy_texture = None
return bpy_texture
def create_bpy_texture(det_model, bpy_material, context):
bpy_texture = bpy.data.textures.new(det_model.texture, type='IMAGE')
bpy_texture.use_preview_alpha = True
bpy_texture_slot = bpy_material.texture_slots.add()
bpy_texture_slot.texture = bpy_texture
bpy_texture_slot.texture_coords = 'UV'
bpy_texture_slot.uv_layer = det_model.mesh.uv_map_name
bpy_texture_slot.use_map_color_diffuse = True
bpy_texture_slot.use_map_alpha = True
bpy_image = context.image(det_model.texture)
bpy_texture.image = bpy_image
utils.stats.created_tex()
def create_material(det_model, abs_image_path, context):
bpy_material = bpy.data.materials.new(det_model.texture)
bpy_material.xray.eshader = det_model.shader
bpy_material.xray.version = context.version
utils.stats.created_mat()
if utils.version.IS_28:
bpy_material.use_nodes = True
bpy_material.blend_method = 'CLIP'
node_tree = bpy_material.node_tree
# remove material nodes
node_tree.nodes.clear()
princ_shader = utils.material.create_mat_nodes(bpy_material)
bpy_image = context.image(det_model.texture)
# texture node
texture_node = node_tree.nodes.new('ShaderNodeTexImage')
texture_node.name = det_model.texture
texture_node.label = det_model.texture
texture_node.image = bpy_image
texture_node.select = False
texture_node.location.x = princ_shader.location.x - 500.0
# link nodes
node_tree.links.new(
texture_node.outputs['Color'],
princ_shader.inputs['Base Color']
)
node_tree.links.new(
texture_node.outputs['Alpha'],
princ_shader.inputs['Alpha']
)
else:
bpy_material.use_shadeless = True
bpy_material.use_transparency = True
bpy_material.alpha = 0.0
alternative_image_path = os.path.join(
os.path.dirname(det_model.file_path),
det_model.texture + '.dds'
)
bpy_texture = utils.tex.search_texture_by_tex_path(det_model.texture, abs_image_path)
if bpy_texture is None:
create_bpy_texture(det_model, bpy_material, context)
else:
bpy_texture_slot = bpy_material.texture_slots.add()
bpy_texture_slot.texture = bpy_texture
return bpy_material
def search_material(context, det_model, file_path=None):
abs_image_path = os.path.abspath(os.path.join(
context.tex_folder,
det_model.texture + '.dds'
))
bpy_material = None
det_model.file_path = file_path
det_model.context = context
for material in bpy.data.materials:
if not check_estimated_material(material, det_model):
continue
if not check_estimated_material_texture(material, det_model):
continue
bpy_material = material
break
if not bpy_material:
bpy_material = create_material(det_model, abs_image_path, context)
return bpy_material
def reconstruct_mesh(vertices, uvs, triangles):
# remove doubles vertices
loaded_vertices = {}
remap_vertices = []
remap_indices = {}
remap_index = 0
for vertex_index, vertex_coord in enumerate(vertices):
if loaded_vertices.get(vertex_coord):
remap_indices[vertex_index] = loaded_vertices[vertex_coord]
else:
loaded_vertices[vertex_coord] = remap_index
remap_indices[vertex_index] = remap_index
remap_vertices.append(vertex_coord)
remap_index += 1
# generate new triangles indices and uvs
remap_triangles = []
remap_uvs = []
for vert_1, vert_2, vert_3 in triangles:
remap_triangles.append((
remap_indices[vert_1],
remap_indices[vert_2],
remap_indices[vert_3]
))
remap_uvs.extend((
uvs[vert_1],
uvs[vert_2],
uvs[vert_3]
))
return remap_vertices, remap_uvs, remap_triangles
def read_mesh_data(packed_reader, det_model):
# read vertices coordinates and uvs
S_FFFFF = rw.read.PackedReader.prep('5f')
vertices = []
uvs = []
for _ in range(det_model.mesh.vertices_count):
co_x, co_y, co_z, co_u, co_v = packed_reader.getp(S_FFFFF)
vertices.append((co_x, co_z, co_y))
uvs.append((co_u, 1.0 - co_v))
# read triangles indices
S_HHH = rw.read.PackedReader.prep('3H')
triangles = []
for _ in range(det_model.mesh.indices_count // 3):
vert_1, vert_2, vert_3 = packed_reader.getp(S_HHH)
triangles.append((vert_1, vert_3, vert_2))
return vertices, uvs, triangles
def create_geometry(b_mesh, vertices, triangles):
# create vertices
for vertex_coord in vertices:
b_mesh.verts.new(vertex_coord)
b_mesh.verts.ensure_lookup_table()
# create triangles
bmesh_faces = []
for vert_1, vert_2, vert_3 in triangles:
try:
bmesh_face = b_mesh.faces.new((
b_mesh.verts[vert_1],
b_mesh.verts[vert_2],
b_mesh.verts[vert_3]
))
bmesh_face.smooth = True
bmesh_faces.append(bmesh_face)
except ValueError:
bmesh_faces.append(None)
b_mesh.faces.ensure_lookup_table()
return bmesh_faces
def create_uv(b_mesh, det_model, bmesh_faces, uvs):
uv_layer = b_mesh.loops.layers.uv.new(det_model.mesh.uv_map_name)
uv_index = 0
for face in bmesh_faces:
if face:
for loop in face.loops:
loop[uv_layer].uv = uvs[uv_index]
uv_index += 1
else:
uv_index += 3 # skip 3 loop
def create_mesh(packed_reader, det_model):
if det_model.mesh.indices_count % 3:
raise log.AppError(text.error.dm_bad_indices)
b_mesh = bmesh.new()
vertices, uvs, triangles = read_mesh_data(packed_reader, det_model)
vertices, uvs, triangles = reconstruct_mesh(vertices, uvs, triangles)
bmesh_faces = create_geometry(b_mesh, vertices, triangles)
create_uv(b_mesh, det_model, bmesh_faces, uvs)
# assign images
if not utils.version.IS_28:
texture_layer = b_mesh.faces.layers.tex.new(
det_model.mesh.uv_map_name
)
bpy_image = det_model.mesh.bpy_material.texture_slots[0].texture.image
for face in b_mesh.faces:
face[texture_layer].image = bpy_image
b_mesh.normal_update()
b_mesh.to_mesh(det_model.mesh.bpy_mesh)
|
06581edc525e538557608dc2f4f679a98fe5d771
|
3ec38f732b21b0a00e822dac730bdc1748902144
|
/scar/providers/aws/resourcegroups.py
|
c0be463c1f3e0b8a41407f1de5cce2d4787823c3
|
[
"Apache-2.0"
] |
permissive
|
grycap/scar
|
e5594c1eb79a0730409c97d48bc511757a05dcbd
|
e6c8b06a43b310d2c1e58d7826239e259dd826d7
|
refs/heads/master
| 2023-08-22T00:39:28.004454
| 2023-05-22T11:01:10
| 2023-05-22T11:01:10
| 91,441,209
| 613
| 59
|
Apache-2.0
| 2022-11-29T06:36:46
| 2017-05-16T09:35:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
resourcegroups.py
|
# Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with methods and classes to manage AWS Resource Groups"""
from typing import List
from botocore.exceptions import ClientError
from scar.providers.aws import GenericClient
import scar.logger as logger
class ResourceGroups(GenericClient):
"""Class to manage AWS Resource Groups"""
def __init__(self, resources_info) -> None:
super().__init__(resources_info.get('lambda'))
def get_resource_arn_list(self, iam_user_id: str, resource_type: str = 'lambda') -> List:
"""Returns a list of ARNs filtered by the resource_type
passed and the tags created by scar."""
try:
# Creation of a function_info filter by tags
tag_filters = [{'Key': 'owner', 'Values': [iam_user_id]},
{'Key': 'createdby', 'Values': ['scar']}]
resource_type_filters = [resource_type]
tagged_resources = self.client.get_tagged_resources(tag_filters, resource_type_filters)
return [function_info['ResourceARN'] for function_info in tagged_resources]
except ClientError as cerr:
logger.error("Error getting function_info arn by tag",
f"Error getting function_info arn by tag: {cerr}")
raise cerr
|
446795c8d9a051705ffd73b755eca9cf5cf0992a
|
bb1ca2c507c0975e3c20c042c35237feec7f7554
|
/PyGitUp/tests/test_rebase_arguments.py
|
9d41cc818a54e96916c37da14b3c2a3ff524ed7a
|
[
"MIT"
] |
permissive
|
msiemens/PyGitUp
|
655a9880642cff99b67eb008e994b986184d1166
|
0aa437834c08b63f439c53bb8a959f28d33e8559
|
refs/heads/master
| 2023-08-15T03:35:47.793548
| 2023-07-24T18:30:34
| 2023-07-24T18:30:34
| 7,652,428
| 452
| 44
|
MIT
| 2023-09-06T18:39:33
| 2013-01-16T19:39:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
test_rebase_arguments.py
|
# System imports
import os
from os.path import join
import pytest
from git import *
from PyGitUp.git_wrapper import RebaseError
from PyGitUp.tests import basepath, write_file, init_master, update_file, testfile_name
test_name = 'rebase-arguments'
repo_path = join(basepath, test_name + os.sep)
def _read_file(path):
with open(path) as f:
return f.read()
def setup():
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
assert repo.working_dir == path
# Modify file in master
master_file = update_file(master, test_name)
# Modify file in our repo
contents = _read_file(master_file)
contents = contents.replace('line 1', 'line x')
repo_file = join(path, testfile_name)
write_file(repo_file, contents)
repo.index.add([repo_file])
repo.index.commit(test_name)
# Set git-up.rebase.arguments to '--abort', what results in an
# invalid cmd and thus git returning an error, that we look for.
repo.git.config('git-up.rebase.arguments', '--abort')
def test_rebase_arguments():
""" Run 'git up' with rebasing.arguments """
os.chdir(repo_path)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
with pytest.raises(RebaseError):
gitup.run()
assert len(gitup.states) == 1
assert gitup.states[0] == 'rebasing'
|
bc82013612d475f32188127e90f869fd89d95f7a
|
f6a2bcf29af64a1b89dfde9bdbe9496c3f8115dc
|
/assets/eip-6475/optional.py
|
2a1778aff063d0ff9388496730b2eed8be5b4ec2
|
[
"CC0-1.0"
] |
permissive
|
ethereum/EIPs
|
cb4e60d893c19fad08a9ed72060d410eecdc4c8f
|
068867b920b357a18184fcf77813ffe8d32b7bdd
|
refs/heads/master
| 2023-08-31T11:16:51.351624
| 2023-08-30T23:34:51
| 2023-08-30T23:34:51
| 44,971,752
| 12,994
| 5,168
|
CC0-1.0
| 2023-09-14T16:57:20
| 2015-10-26T13:57:23
|
Solidity
|
UTF-8
|
Python
| false
| false
| 6,817
|
py
|
optional.py
|
from typing import Any, BinaryIO, Optional as PyOptional, TypeVar, Type, cast
from remerkleable.basic import uint256
from remerkleable.complex import MonoSubtreeView
from remerkleable.core import BasicView, View, ViewHook
from remerkleable.tree import Gindex, Node, PairNode, get_depth, subtree_fill_to_contents, zero_node
from remerkleable.tree import LEFT_GINDEX, RIGHT_GINDEX
T = TypeVar('T', bound="Optional")
class Optional(MonoSubtreeView):
__slots__ = ()
def __new__(cls, value: PyOptional[Type[T]] = None, backing: PyOptional[Node] = None, hook: PyOptional[ViewHook] = None, **kwargs):
if backing is not None:
if value is not None:
raise Exception("cannot have both a backing and a value to init Optional")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
elem_cls = cls.element_cls()
assert cls.limit() == 1
input_views = []
if value is not None:
if isinstance(value, View):
input_views.append(value)
else:
input_views.append(elem_cls.coerce_view(value))
input_nodes = cls.views_into_chunks(input_views)
contents = subtree_fill_to_contents(input_nodes, cls.contents_depth())
backing = PairNode(contents, uint256(len(input_views)).get_backing())
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
def __class_getitem__(cls, element_type) -> Type["Optional"]:
limit = 1
contents_depth = get_depth(limit)
packed = isinstance(element_type, BasicView)
class SpecialOptionView(Optional):
@classmethod
def is_packed(cls) -> bool:
return packed
@classmethod
def contents_depth(cls) -> int:
return contents_depth
@classmethod
def element_cls(cls) -> Type[View]:
return element_type
@classmethod
def limit(cls) -> int:
return limit
SpecialOptionView.__name__ = SpecialOptionView.type_repr()
return SpecialOptionView
def length(self) -> int:
ll_node = super().get_backing().get_right()
ll = cast(uint256, uint256.view_from_backing(node=ll_node, hook=None))
return int(ll)
def value_byte_length(self) -> int:
if self.length() == 0:
return 0
else:
elem_cls = self.__class__.element_cls()
if elem_cls.is_fixed_byte_length():
return elem_cls.type_byte_length()
else:
return cast(View, el).value_byte_length()
def get(self) -> PyOptional[View]:
if self.length() == 0:
return None
else:
return super().get(0)
def set(self, v: PyOptional[View]) -> None:
if v is None:
if self.length() == 0:
return
i = 0
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target)
next_backing = set_last(zero_node(0))
can_summarize = (target & 1) == 0
if can_summarize:
while (target & 1) == 0 and target != 0b10:
target >>= 1
summary_fn = next_backing.summarize_into(target)
next_backing = summary_fn()
set_length = next_backing.rebind_right
new_length = uint256(i).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
else:
if self.length() == 1:
super().set(0, v)
return
i = 0
elem_type: Type[View] = self.__class__.element_cls()
if not isinstance(v, elem_type):
v = elem_type.coerce_view(v)
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target, expand=True)
next_backing = set_last(v.get_backing())
set_length = next_backing.rebind_right
new_length = uint256(i + 1).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
def __repr__(self):
value = self.get()
if value is None:
return f"{self.type_repr()}(None)"
else:
return f"{self.type_repr()}(Some({repr(value)}))"
@classmethod
def type_repr(cls) -> str:
return f"Optional[{cls.element_cls().__name__}]"
@classmethod
def is_packed(cls) -> bool:
raise NotImplementedError
@classmethod
def contents_depth(cls) -> int:
raise NotImplementedError
@classmethod
def tree_depth(cls) -> int:
return cls.contents_depth() + 1 # 1 extra for mix-in
@classmethod
def limit(cls) -> int:
raise NotImplementedError
@classmethod
def deserialize(cls: Type[T], stream: BinaryIO, scope: int) -> Type[T]:
if scope == 0:
return cls()
else:
is_some = stream.read(1)
if is_some != bytes([0x01]):
raise ValueError(f"Unexpected is_some {is_some} (expected: 1)")
return cls(cls.element_cls().deserialize(stream, scope - 1))
def serialize(self, stream: BinaryIO) -> int:
v = self.get()
if v is None:
return 0
else:
stream.write(bytes([0x01]))
return 1 + v.serialize(stream)
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key in ('__selector__', '__is_some__'):
return uint256
if not isinstance(key, int):
raise TypeError(f"expected integer key, got {key}")
if not (0 <= int(key) <= 1):
raise KeyError(f"key {key} is not a valid selector for optional {repr(cls)}")
return cls.element_cls()
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key in ('__selector__', '__is_some__'):
return RIGHT_GINDEX
if not isinstance(key, int):
raise TypeError(f"expected integer key, got {key}")
if not (0 <= int(key) <= 1):
raise KeyError(f"key {key} is not a valid selector for optional {repr(cls)}")
return LEFT_GINDEX
@classmethod
def default_node(cls) -> Node:
return PairNode(zero_node(cls.contents_depth()), zero_node(0)) # mix-in 0
@classmethod
def is_fixed_byte_length(cls) -> bool:
return False
@classmethod
def min_byte_length(cls) -> int:
return 0
@classmethod
def max_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.max_byte_length()
return 1 + bytes_per_elem
|
b1229513d39e47714d1050859574b4847eb92753
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/linux_benchmarks/hbase_ycsb_benchmark.py
|
1395107f17c3c306c9cdc37a477a87445c24cdb9
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 9,098
|
py
|
hbase_ycsb_benchmark.py
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs YCSB against HBase.
HBase is a scalable NoSQL database built on Hadoop.
https://hbase.apache.org/
A running installation consists of:
* An HDFS NameNode.
* HDFS DataNodes.
* An HBase master node.
* HBase regionservers.
* A zookeeper cluster (https://zookeeper.apache.org/).
See: http://hbase.apache.org/book.html#_distributed.
This benchmark provisions:
* A single node functioning as HDFS NameNode, HBase master, and zookeeper
quorum member.
* '--num_vms - 1' nodes serving as both HDFS DataNodes and HBase region
servers (so region servers and data are co-located).
By default only the master node runs Zookeeper. Some regionservers may be added
to the zookeeper quorum with the --hbase_zookeeper_nodes flag.
HBase web UI on 15030.
HDFS web UI on 50070.
"""
import functools
import logging
import os
import posixpath
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hadoop
from perfkitbenchmarker.linux_packages import hbase
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
flags.DEFINE_string(
'hbase_binding', 'hbase12', 'HBase binding to use. After '
'YCSB 0.17.0, "hbase1x" is renamed to "hbase1".')
flags.DEFINE_integer('hbase_zookeeper_nodes', 1, 'Number of Zookeeper nodes.')
flags.DEFINE_boolean('hbase_use_snappy', True,
'Whether to use snappy compression.')
BENCHMARK_NAME = 'hbase_ycsb'
BENCHMARK_CONFIG = """
hbase_ycsb:
description: >
Run YCSB against HBase. Specify the HBase
cluster size with --num_vms. Specify the number of YCSB VMs
with --ycsb_client_vms.
vm_groups:
clients:
vm_spec: *default_single_core
master:
vm_spec: *default_single_core
disk_spec: *default_500_gb
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
HBASE_SITE = 'hbase-site.xml'
CREATE_TABLE_SCRIPT = 'hbase/create-ycsb-table.hbaseshell.j2'
TABLE_NAME = 'usertable'
COLUMN_FAMILY = 'cf'
TABLE_SPLIT_COUNT = 200
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
num_vms = max(FLAGS.num_vms, 2)
if FLAGS['num_vms'].present and FLAGS.num_vms < 2:
raise ValueError('hbase_ycsb requires at least 2 HBase VMs.')
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms
if FLAGS['num_vms'].present:
config['vm_groups']['workers']['vm_count'] = num_vms - 1
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
hbase.CheckPrerequisites()
hadoop.CheckPrerequisites()
ycsb.CheckPrerequisites()
def CreateYCSBTable(vm, table_name=TABLE_NAME, family=COLUMN_FAMILY,
n_splits=TABLE_SPLIT_COUNT, limit_filesize=True,
use_snappy=True):
"""Create a table for use with YCSB.
Args:
vm: Virtual machine from which to create the table.
table_name: Name for the table.
family: Column family name.
limit_filesize: boolean. Should the filesize be limited to 4GB?
n_splits: Initial number of regions for the table. Default follows
HBASE-4163.
"""
# See: https://issues.apache.org/jira/browse/HBASE-4163
template_path = data.ResourcePath(CREATE_TABLE_SCRIPT)
remote = posixpath.join(hbase.HBASE_DIR,
os.path.basename(os.path.splitext(template_path)[0]))
vm.RenderTemplate(template_path, remote,
context={'table_name': table_name,
'family': family,
'limit_filesize': limit_filesize,
'n_splits': n_splits,
'use_snappy': use_snappy})
# TODO(user): on HBase update, add '-n' flag.
command = "{0}/hbase shell {1}".format(hbase.HBASE_BIN, remote)
vm.RemoteCommand(command)
def _GetVMsByRole(vm_groups):
"""Partition "vms" by role in the benchmark.
* The first VM is the master.
* The first FLAGS.hbase_zookeeper_nodes form the Zookeeper quorum.
* The last FLAGS.ycsb_client_vms are loader nodes.
* The nodes which are neither the master nor loaders are HBase region servers.
Args:
vm_groups: The benchmark_spec's vm_groups dict.
Returns:
A dictionary with keys 'vms', 'hbase_vms', 'master', 'zk_quorum', 'workers',
and 'clients'.
"""
hbase_vms = vm_groups['master'] + vm_groups['workers']
vms = hbase_vms + vm_groups['clients']
return {'vms': vms,
'hbase_vms': hbase_vms,
'master': vm_groups['master'][0],
'zk_quorum': hbase_vms[:FLAGS.hbase_zookeeper_nodes],
'workers': vm_groups['workers'],
'clients': vm_groups['clients']}
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run hadoop.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
by_role = _GetVMsByRole(benchmark_spec.vm_groups)
loaders = by_role['clients']
assert loaders, 'No loader VMs: {0}'.format(by_role)
# HBase cluster
hbase_vms = by_role['hbase_vms']
assert hbase_vms, 'No HBase VMs: {0}'.format(by_role)
master = by_role['master']
zk_quorum = by_role['zk_quorum']
assert zk_quorum, 'No zookeeper quorum: {0}'.format(by_role)
workers = by_role['workers']
assert workers, 'No workers: {0}'.format(by_role)
hbase_install_fns = [functools.partial(vm.Install, 'hbase')
for vm in hbase_vms]
ycsb_install_fns = [functools.partial(vm.Install, 'ycsb')
for vm in loaders]
background_tasks.RunThreaded(
lambda f: f(), hbase_install_fns + ycsb_install_fns
)
hadoop.ConfigureAndStart(master, workers, start_yarn=False)
hbase.ConfigureAndStart(master, workers, zk_quorum)
CreateYCSBTable(master, use_snappy=FLAGS.hbase_use_snappy)
# Populate hbase-site.xml on the loaders.
master.PullFile(
vm_util.GetTempDir(),
posixpath.join(hbase.HBASE_CONF_DIR, HBASE_SITE))
def PushHBaseSite(vm):
conf_dir = posixpath.join(ycsb.YCSB_DIR, FLAGS.hbase_binding + '-binding',
'conf')
vm.RemoteCommand('mkdir -p {}'.format(conf_dir))
vm.PushFile(
os.path.join(vm_util.GetTempDir(), HBASE_SITE),
posixpath.join(conf_dir, HBASE_SITE))
background_tasks.RunThreaded(PushHBaseSite, loaders)
benchmark_spec.executor = ycsb.YCSBExecutor(FLAGS.hbase_binding)
def Run(benchmark_spec):
"""Spawn YCSB and gather the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
by_role = _GetVMsByRole(benchmark_spec.vm_groups)
loaders = by_role['clients']
logging.info('Loaders: %s', loaders)
metadata = {
'ycsb_client_vms': len(loaders),
'hbase_cluster_size': len(by_role['hbase_vms']),
'hbase_zookeeper_nodes': FLAGS.hbase_zookeeper_nodes,
'hbase_version': hbase.GetHBaseVersion(by_role['hbase_vms'][0]),
}
# By default YCSB uses a BufferedMutator for Puts / Deletes.
# This leads to incorrect update latencies, since since the call returns
# before the request is acked by the server.
# Disable this behavior during the benchmark run.
run_kwargs = {'columnfamily': COLUMN_FAMILY,
'clientbuffering': 'false'}
load_kwargs = run_kwargs.copy()
# During the load stage, use a buffered mutator with a single thread.
# The BufferedMutator will handle multiplexing RPCs.
load_kwargs['clientbuffering'] = 'true'
if not FLAGS['ycsb_preload_threads'].present:
load_kwargs['threads'] = 1
samples = list(benchmark_spec.executor.LoadAndRun(
loaders, load_kwargs=load_kwargs, run_kwargs=run_kwargs))
for sample in samples:
sample.metadata.update(metadata)
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
by_role = _GetVMsByRole(benchmark_spec.vm_groups)
hbase.Stop(by_role['master'])
hadoop.StopHDFS(by_role['master'])
background_tasks.RunThreaded(hadoop.CleanDatanode, by_role['workers'])
|
e214493ddecaa4bb8da7e824b076c69bf13784be
|
3cb75376d2104ffbbc1cfc08a9b7a1425931705f
|
/app/plugins/poem/config.py
|
32f85a9ad0741cb622bb9c75f9e62f3760b90c38
|
[
"Apache-2.0"
] |
permissive
|
xiaoyunjie/openvpn-cms-flask
|
db6435432c2fd1a42ee9587ce5812b073f3584ee
|
1a39a984e6a8990c35bb79a103a794a3dc7fdd59
|
refs/heads/master
| 2023-05-25T07:19:03.130505
| 2023-05-23T08:19:52
| 2023-05-23T08:19:52
| 242,948,519
| 200
| 76
|
Apache-2.0
| 2022-05-06T06:08:10
| 2020-02-25T08:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 11
|
py
|
config.py
|
limit = 20
|
5087eadf45cd8fb84802279f30a18f9e356da7a5
|
674081023a48bba16349b49bb3a593d1231614ff
|
/src/PythonUnitTests/LargeArrayTests.py
|
30ca944f5e8d76ccb65d1dda458f8cf4c9285eea
|
[
"BSD-3-Clause"
] |
permissive
|
Quansight-Labs/numpy.net
|
139fd8044412da2084c9498b53abc6996376ca13
|
c0d64d4bc56ee80cf4e0c0bc148c241b1c6c32b9
|
refs/heads/master
| 2023-06-28T14:15:08.080709
| 2023-06-16T13:48:48
| 2023-06-16T13:48:48
| 163,912,341
| 117
| 13
|
BSD-3-Clause
| 2023-04-02T00:26:29
| 2019-01-03T02:17:42
|
C#
|
UTF-8
|
Python
| false
| false
| 9,640
|
py
|
LargeArrayTests.py
|
import unittest
import numpy as np
from nptest import nptest
class LargeArrayTests(unittest.TestCase):
def test_largearray_matmul_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat)
print(z)
def test_largearray_matmul_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_1(self):
width = 2048
height = 2048
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_2(self):
width = 4096
height = 4096
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(1, width), y_range.reshape(height, 1))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_copy_int64_1(self):
length = 268435435 # (Int32.MaxValue) / sizeof(double) - 20;
x = np.arange(0, length, 1, dtype = np.int64);
z = np.sum(x);
print(z)
y = x.copy()
z = np.sum(y)
print(z)
def test_largearray_copy_int64_2(self):
length = 268435434 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
z = np.sum(x, axis=0);
z = np.sum(z)
print(z)
y = x.copy()
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_meshgrid_int64_2(self):
length = 100 * 100
x = np.arange(0,length, 1, dtype = np.int64)
x1, x2 = np.meshgrid(x,x)
print(x1.shape)
print(x2.shape)
z = np.sum(x1)
print(z)
z = np.sum(x2)
print(z)
def test_largearray_checkerboard_1(self):
x = np.zeros((2048,2048),dtype=int)
x[1::2,::2] = 1
x[::2,1::2] = 1
print(np.sum(x))
def test_largearray_byteswap_int64_2(self):
length = 1024 * 1024* 32 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
y = x.byteswap();
z = np.sum(y, axis=0);
z = np.sum(z)
print(z)
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_unique_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
matrix = matrix[1:40:2, 1:-2:1]
uvalues, indexes, inverse, counts = np.unique(matrix, return_counts = True, return_index=True, return_inverse=True);
print(np.sum(uvalues))
print(np.sum(indexes))
print(np.sum(inverse))
print(np.sum(counts))
def test_largearray_where_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
print(np.sum(matrix))
indices = np.where(matrix % 2 == 0);
m1 = matrix[indices]
print(np.sum(m1))
def test_largearray_insert_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.insert(matrix, 0, [999,100,101])
print(np.sum(m1))
def test_largearray_append_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.append(matrix, [999,100,101])
print(np.sum(m1))
def test_largearray_concatenate_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.arange(1, 16000001, dtype=np.int64).reshape((40, -1));
c = np.concatenate((a, b), axis=0)
print(np.sum(c))
#d = np.concatenate((a.T, b), axis=1)
#print(np.sum(d))
e = np.concatenate((a, b), axis=None)
print(np.sum(e))
def test_largearray_min_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amin(a)
print(np.sum(b))
b = np.amin(a, axis=0)
print(np.sum(b))
b = np.amin(a, axis=1)
print(np.sum(b))
def test_largearray_max_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amax(a)
print(np.sum(b))
b = np.amax(a, axis=0)
print(np.sum(b))
b = np.amax(a, axis=1)
print(np.sum(b))
def test_largearray_setdiff1d_INT64(self):
a = np.arange(16000000, dtype=np.int64);
b = np.array([3, 4, 5, 6])
c = np.setdiff1d(a, b)
print(np.sum(a))
print(np.sum(b))
print(np.sum(c))
def test_largearray_copyto_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape(-1, 5);
print(np.sum(a))
b = np.array([1, 2, 3, 4, 5])
np.copyto(a, b)
print(np.sum(a))
a = np.arange(16000000, dtype=np.int64).reshape(-1, 5);
b = np.array([1, 2, 3, 4, 5])
np.copyto(a, b, where = b % 2 == 0)
print(np.sum(a))
def test_largearray_sin_DOUBLE(self):
a = np.ones(16000000, dtype=np.float64).reshape(-1, 5);
b = np.sin(a)
print(np.sum(b))
def test_largearray_diff_INT64(self):
a = np.arange(0, 16000000 * 3, 3, dtype=np.int64).reshape(-1, 5);
b = np.diff(a)
print(np.sum(b))
def test_largearray_ediff1d_INT64(self):
a = np.arange(0, 16000000 * 3, 3, dtype=np.int64).reshape(-1, 5);
b = np.ediff1d(a)
print(np.sum(b))
def test_largearray_gradient_INT64(self):
a = np.arange(0, 16000000 * 3, 3, dtype=np.int64).reshape(-1, 5);
b = np.gradient(a)
print(np.sum(b[0]))
print(np.sum(b[1]))
def test_largearray_cross_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((-1, 2));
b = np.arange(1, 16000001, dtype=np.int64).reshape((-1, 2));
c = np.cross(a, b)
print(np.sum(c))
def test_largearray_convolve_INT64(self):
a = np.arange(160000, dtype=np.int64);
b = np.arange(1, 160001, dtype=np.int64);
c = np.convolve(a, b)
print(np.sum(c))
def test_largearray_clip_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((-1, 2));
c = np.clip(a, 1, 1000);
print(np.sum(c))
def test_largearray_take_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((-1, 2));
indices = np.arange(0,a.size, 2, np.intp)
c = np.take(a, indices);
print(np.sum(c))
def test_largearray_choose_INT64(self):
choice1 = np.arange(16000000, dtype=np.int64);
choice2 = np.arange(16000000, dtype=np.int64);
choice3 = np.arange(16000000, dtype=np.int64);
choice4 = np.arange(16000000, dtype=np.int64);
selection = np.repeat([0,1,2,3], choice1.size/4)
c = np.choose(selection, [choice1, choice2, choice3, choice4])
print(np.sum(c))
def test_largearray_dot_Float64(self):
a = np.arange(0, 2000 * 100, dtype=np.float64).reshape((2000, -1));
b = np.arange(0, 2000 * 100, dtype=np.float64).reshape((-1, 2000));
for x in range(0,10, 1):
c = np.dot(a, b);
sum = np.sum(c)
print(sum)
def test_largearray_inner_Float64(self):
a = np.arange(0, 2000 * 100, dtype=np.float64).reshape((2000, -1));
b = np.arange(0, 2000 * 100, dtype=np.float64).reshape((2000, -1));
for x in range(0,10, 1):
c = np.inner(a, b);
sum = np.sum(c)
print(sum)
if __name__ == '__main__':
unittest.main()
|
f0439bffcceec9b70754abf0e207e47f1c90f526
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/ReadURLsFromFile/ReadURLsFromFile.py
|
4dbc56e317a474159f99d3c53d2f444daad8fb35
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
ReadURLsFromFile.py
|
# coding=utf-8
__author__ = "ipetrash"
import argparse
def create_parser():
parser = argparse.ArgumentParser(description="Read URLs from file with URL.")
return parser
if __name__ == "__main__":
create_parser().parse_args()
file_path = input("Input file path: ")
with open(file_path) as f:
for line in f.readlines():
print(line)
|
67d7ac653f3243a0d4d06a776cb2d35219f5f26b
|
d2621d10d6d0aa4fcecbb11c281e3dd680b985fc
|
/ts/torch_handler/object_detector.py
|
1539c0b9e8c68d1ab8c6fe7e4d80a03129d67bc3
|
[
"Apache-2.0"
] |
permissive
|
pytorch/serve
|
7b562a4d6372e77ce28fc71a5b8d5455c6f02290
|
242895c6b4596c4119ec09d6139e627c5dd696b6
|
refs/heads/master
| 2023-08-31T05:24:10.950144
| 2023-08-31T02:49:22
| 2023-08-31T02:49:22
| 212,488,700
| 3,689
| 895
|
Apache-2.0
| 2023-09-13T22:34:31
| 2019-10-03T03:17:43
|
Java
|
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
object_detector.py
|
"""
Module for object detection default handler
"""
import torch
from torchvision import transforms
from torchvision import __version__ as torchvision_version
from packaging import version
from .vision_handler import VisionHandler
from ..utils.util import map_class_to_label
class ObjectDetector(VisionHandler):
"""
ObjectDetector handler class. This handler takes an image
and returns list of detected classes and bounding boxes respectively
"""
image_processing = transforms.Compose([transforms.ToTensor()])
threshold = 0.5
def initialize(self, context):
super().initialize(context)
properties = context.system_properties
# Torchvision breaks with object detector models before 0.6.0
if version.parse(torchvision_version) < version.parse("0.6.0"):
self.initialized = False
self.device = torch.device(
"cuda"
if torch.cuda.is_available() and properties.get("gpu_id") is not None
else "cpu"
)
self.model.to(self.device)
self.model.eval()
self.initialized = True
def postprocess(self, data):
result = []
box_filters = [row["scores"] >= self.threshold for row in data]
filtered_boxes, filtered_classes, filtered_scores = [
[
row[key][box_filter].tolist()
for row, box_filter in zip(data, box_filters)
]
for key in ["boxes", "labels", "scores"]
]
for classes, boxes, scores in zip(
filtered_classes, filtered_boxes, filtered_scores
):
retval = []
for _class, _box, _score in zip(classes, boxes, scores):
_retval = map_class_to_label([[_box]], self.mapping, [[_class]])[0]
_retval["score"] = _score
retval.append(_retval)
result.append(retval)
return result
|
3dccc375fd80786d7e01504681ec66a3fcc64c23
|
2d3a0b9caaff0ec56de90ca9285648848395c18b
|
/tests/test_grab.py
|
89be5dbb0b5d68529e954aa0a61380f192a86ce1
|
[
"BSD-2-Clause"
] |
permissive
|
imageio/imageio
|
f96a03fd0c0c704f76ec4ed2f3f5137b5d1d3d2b
|
a0091371dd42442ca3fae0fc0e8a4f0925757ac7
|
refs/heads/master
| 2023-09-04T11:09:46.646163
| 2023-09-04T02:23:41
| 2023-09-04T02:23:41
| 9,861,437
| 1,332
| 346
|
BSD-2-Clause
| 2023-09-04T13:54:07
| 2013-05-04T22:56:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,603
|
py
|
test_grab.py
|
import sys
import numpy as np
from pytest import raises
import imageio.v2 as iio
import imageio.plugins.grab
def test_grab_plugin_load():
imageio.plugins.grab.BaseGrabFormat._ImageGrab = FakeImageGrab
imageio.plugins.grab.BaseGrabFormat._pillow_imported = True
_plat = sys.platform
sys.platform = "win32"
try:
reader = iio.get_reader("<screen>")
assert reader.format.name == "SCREENGRAB"
reader = iio.get_reader("<clipboard>")
assert reader.format.name == "CLIPBOARDGRAB"
with raises(ValueError):
iio.get_writer("<clipboard>")
with raises(ValueError):
iio.get_writer("<screen>")
finally:
sys.platform = _plat
imageio.plugins.grab.BaseGrabFormat._ImageGrab = None
imageio.plugins.grab.BaseGrabFormat._pillow_imported = False
class FakeImageGrab:
has_clipboard = True
@classmethod
def grab(cls):
return np.zeros((8, 8, 3), np.uint8)
@classmethod
def grabclipboard(cls):
if cls.has_clipboard:
return np.zeros((9, 9, 3), np.uint8)
else:
return None
def test_grab_simulated():
# Hard to test for real, if only because its only fully suppored on
# Windows, but we can monkey patch so we can test all the imageio bits.
imageio.plugins.grab.BaseGrabFormat._ImageGrab = FakeImageGrab
imageio.plugins.grab.BaseGrabFormat._pillow_imported = True
_plat = sys.platform
sys.platform = "win32"
try:
im = iio.imread("<screen>")
assert im.shape == (8, 8, 3)
reader = iio.get_reader("<screen>")
im1 = reader.get_data(0)
im2 = reader.get_data(0)
im3 = reader.get_data(1)
assert im1.shape == (8, 8, 3)
assert im2.shape == (8, 8, 3)
assert im3.shape == (8, 8, 3)
im = iio.imread("<clipboard>")
assert im.shape == (9, 9, 3)
reader = iio.get_reader("<clipboard>")
im1 = reader.get_data(0)
im2 = reader.get_data(0)
im3 = reader.get_data(1)
assert im1.shape == (9, 9, 3)
assert im2.shape == (9, 9, 3)
assert im3.shape == (9, 9, 3)
# Grabbing from clipboard can fail if there is no image data to grab
FakeImageGrab.has_clipboard = False
with raises(RuntimeError):
im = iio.imread("<clipboard>")
finally:
sys.platform = _plat
imageio.plugins.grab.BaseGrabFormat._ImageGrab = None
imageio.plugins.grab.BaseGrabFormat._pillow_imported = False
FakeImageGrab.has_clipboard = True
|
2330036781bf8f2412c6dcd78410e613afa7f10b
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/mvp/test_modeling_mvp.py
|
8e6143529a804588d03cf91f15a84f965308a8aa
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 33,290
|
py
|
test_modeling_mvp.py
|
# coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch MVP model. """
import copy
import tempfile
import unittest
import timeout_decorator # noqa
from transformers import MvpConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpTokenizer,
)
from transformers.models.mvp.modeling_mvp import MvpDecoder, MvpEncoder, shift_tokens_right
def prepare_mvp_inputs_dict(
config,
input_ids,
decoder_input_ids=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class MvpModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_mvp_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MvpConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def get_pipeline_config(self):
config = self.get_config()
config.max_position_embeddings = 100
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MvpModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MvpModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MvpEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MvpDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class MvpHeadTests(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
labels = _long_tensor([2] * batch_size).to(torch_device)
config.num_labels = 3
model = MvpForSequenceClassification(config)
model.to(torch_device)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels)
expected_shape = torch.Size((batch_size, config.num_labels))
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_question_answering_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
sequence_labels = ids_tensor([batch_size], 2).to(torch_device)
model = MvpForQuestionAnswering(config)
model.to(torch_device)
outputs = model(
input_ids=input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.assertEqual(outputs["start_logits"].shape, input_ids.shape)
self.assertEqual(outputs["end_logits"].shape, input_ids.shape)
self.assertIsInstance(outputs["loss"].item(), float)
@timeout_decorator.timeout(1)
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = MvpForConditionalGeneration(config)
lm_model.to(torch_device)
outputs = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_lm_uneven_forward(self):
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_generate_beam_search(self):
input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long)
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
generated_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length))
def test_shift_tokens_right(self):
input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long)
shifted = shift_tokens_right(input_ids, 1, 2)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
@slow
def test_tokenization(self):
tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
examples = [" Hello world", " DomDramg"] # need leading spaces for equality
fairseq_results = [
torch.tensor([0, 20920, 232, 2]),
torch.tensor([0, 11349, 495, 4040, 571, 2]),
]
for ex, desired_result in zip(examples, fairseq_results):
mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze()
assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex)
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_resize_tokens_embeddings_more(self):
config, input_ids, _ = self._get_config_and_data()
def _get_embs(m):
return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone())
model = MvpForConditionalGeneration(config).eval().to(torch_device)
input, output = _get_embs(model)
self.assertTrue(torch.eq(input, output).all())
new_vocab_size = 45
model.resize_token_embeddings(new_vocab_size)
input_new, output_new = _get_embs(model)
self.assertEqual(input_new.shape, (new_vocab_size, config.d_model))
self.assertEqual(output_new.shape, (new_vocab_size, config.d_model))
self.assertTrue(torch.eq(input_new, output_new).all())
@require_torch
class MvpModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering)
if is_torch_available()
else ()
)
all_generative_model_classes = (MvpForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": MvpForConditionalGeneration,
"feature-extraction": MvpModel,
"fill-mask": MvpForConditionalGeneration,
"question-answering": MvpForQuestionAnswering,
"summarization": MvpForConditionalGeneration,
"text-classification": MvpForSequenceClassification,
"text-generation": MvpForCausalLM,
"text2text-generation": MvpForConditionalGeneration,
"translation": MvpForConditionalGeneration,
"zero-shot": MvpForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False
test_pruning = False
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = MvpModelTester(self)
self.config_tester = ConfigTester(self, config_class=MvpConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# MvpForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
class MvpModelIntegrationTests(unittest.TestCase):
@cached_property
def default_tokenizer(self):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp")
@slow
def test_inference_no_head(self):
model = MvpModel.from_pretrained("RUCAIBox/mvp").to(torch_device)
input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = input_ids.ne(model.config.pad_token_id)
with torch.no_grad():
output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
expected_shape = torch.Size((1, 11, 1024))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
@slow
def test_summarization_inference(self):
model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device)
tok = self.default_tokenizer
# fmt: off
PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'"""
# fmt: on
EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet."
dct = tok.batch_encode_plus(
[PGE_ARTICLE],
return_tensors="pt",
).to(torch_device)
hypotheses_batch = model.generate(**dct)
decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True)
self.assertEqual(EXPECTED_SUMMARY, decoded[0])
class MvpStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
encoder_layers=self.decoder_layers,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = MvpDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = MvpDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MvpDecoder, MvpForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (MvpForCausalLM,) if is_torch_available() else ()
fx_comptatible = True
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=MvpConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
|
57bce6112be89ed19296579f00ffdab200578eef
|
596697330ac6f9cc04f11f327b0342b2c531ba37
|
/Week02/FaceRecognition/data_utils.py
|
875b16d551a409cd72c6593e6ba9c49f8fe1d6cd
|
[] |
no_license
|
snrazavi/Deep_Learning_in_Python_2018
|
302cd0cd415b67705081c91d4503311142047fba
|
4ce7b7abd154b8ef8982190b528809a39d8d167e
|
refs/heads/master
| 2023-02-07T20:38:13.706550
| 2023-02-06T13:43:01
| 2023-02-06T13:43:01
| 153,283,405
| 136
| 71
| null | 2021-09-20T06:32:10
| 2018-10-16T12:41:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
data_utils.py
|
import os
import glob
import shutil
import random
from tqdm import tqdm
def create_validation_data(trn_dir, val_dir, split=0.1, ext='png'):
if not os.path.exists(val_dir):
os.mkdir(val_dir)
train_ds = glob.glob(trn_dir + f'/*/*.{ext}')
print(len(train_ds))
valid_sz = int(split * len(train_ds)) if split < 1.0 else split
valid_ds = random.sample(train_ds, valid_sz)
print(len(valid_ds))
for fname in tqdm(valid_ds):
basename = os.path.basename(fname)
label = fname.split('\\')[-2]
src_folder = os.path.join(trn_dir, label)
tgt_folder = os.path.join(val_dir, label)
if not os.path.exists(tgt_folder):
os.mkdir(tgt_folder)
shutil.move(os.path.join(src_folder, basename), os.path.join(tgt_folder, basename))
def pseudo_label(probs, tst_dir, test_dl, class_names, threshold=0.99999):
num_data = len(test_dl.dataset)
preds = np.argmax(probs, axis=1)
candidate_idxs = np.arange(num_data)[probs.max(axis=1) >= threshold]
fnames = [f[0].split('\\')[-1] for f in test_dl.dataset.imgs]
imgs = [fnames[i] for i in candidate_idxs]
labels = [class_names[preds[i]] for i in candidate_idxs]
dest_folder = os.path.join(DATA_DIR, 'pseudo', 'train')
for _, (img, label) in tqdm(enumerate(zip(imgs, labels))):
src = os.path.join(tst_dir, 'unk', img)
dst = os.path.join(dest_folder, label, img)
if not os.path.exists(dst):
shutil.copyfile(src, dst)
|
4f720ec3640d90a6a941b76c9474ed1b5a517f90
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/python-pyparsing/template.py
|
a105233640db415d4e1b7fa8cd670e2f1a3e8d24
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
template.py
|
pkgname = "python-pyparsing"
pkgver = "3.0.9"
pkgrel = 0
build_style = "python_pep517"
hostmakedepends = [
"python-setuptools",
"python-flit_core",
"python-installer",
]
depends = ["python"]
pkgdesc = "Python parsing module"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://github.com/pyparsing/pyparsing"
source = f"{url}/archive/pyparsing_{pkgver}.tar.gz"
sha256 = "7e8ce1684c517f57f945698fd3bbf86b36a2e60cd223f74886d3af04deb06306"
# calls urlopen
options = ["!check"]
def do_build(self):
self.do("python", "-m", "flit_core.wheel")
def post_install(self):
self.install_license("LICENSE")
|
98805b1a8bcf90bbc116c5506408c0ed979d9a06
|
9da4adae4c389e84097a0da9bfce40f9132eef96
|
/pygame_menu/font.py
|
2fe87dff157a94c7698adc7601d0e1a2bc7ff765
|
[
"MIT"
] |
permissive
|
ppizarror/pygame-menu
|
f8fd2ff3acefad25b07e19499a2dfebd50507403
|
bcfaccbb11d4a6ecba588eec2851932dc46c2337
|
refs/heads/master
| 2023-07-07T10:38:09.651797
| 2023-06-28T18:00:25
| 2023-06-28T18:00:25
| 89,940,842
| 570
| 207
|
NOASSERTION
| 2023-08-19T19:17:59
| 2017-05-01T16:26:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,536
|
py
|
font.py
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
FONTS
Menu fonts.
"""
__all__ = [
# Fonts path included in resources
'FONT_8BIT',
'FONT_BEBAS',
'FONT_COMIC_NEUE',
'FONT_DIGITAL',
'FONT_FRANCHISE',
'FONT_FIRACODE',
'FONT_FIRACODE_BOLD',
'FONT_FIRACODE_BOLD_ITALIC',
'FONT_FIRACODE_ITALIC',
'FONT_HELVETICA',
'FONT_MUNRO',
'FONT_NEVIS',
'FONT_OPEN_SANS',
'FONT_OPEN_SANS_BOLD',
'FONT_OPEN_SANS_ITALIC',
'FONT_OPEN_SANS_LIGHT',
'FONT_PT_SERIF',
'FONT_EXAMPLES',
# Typing
'FontType',
'FontInstance',
# Utils
'assert_font',
'get_font'
]
from pathlib import Path
from typing import Union, Optional, Any, Dict, Tuple
import os.path as path
import pygame.font as __font
# Available fonts path
__fonts_path__ = path.join(path.dirname(path.abspath(__file__)), 'resources', 'fonts', '{0}')
FONT_8BIT = __fonts_path__.format('8bit.ttf')
FONT_BEBAS = __fonts_path__.format('bebas.ttf')
FONT_COMIC_NEUE = __fonts_path__.format('comic_neue.ttf')
FONT_DIGITAL = __fonts_path__.format('digital.ttf')
FONT_FIRACODE = __fonts_path__.format('FiraCode-Regular.ttf')
FONT_FIRACODE_BOLD = __fonts_path__.format('FiraCode-Bold.ttf')
FONT_FIRACODE_BOLD_ITALIC = __fonts_path__.format('FiraMono-BoldItalic.ttf')
FONT_FIRACODE_ITALIC = __fonts_path__.format('FiraMono-Italic.ttf')
FONT_FRANCHISE = __fonts_path__.format('franchise.ttf')
FONT_HELVETICA = __fonts_path__.format('helvetica.ttf')
FONT_MUNRO = __fonts_path__.format('munro.ttf')
FONT_NEVIS = __fonts_path__.format('nevis.ttf')
FONT_OPEN_SANS = __fonts_path__.format('opensans_regular.ttf')
FONT_OPEN_SANS_BOLD = __fonts_path__.format('opensans_bold.ttf')
FONT_OPEN_SANS_ITALIC = __fonts_path__.format('opensans_italic.ttf')
FONT_OPEN_SANS_LIGHT = __fonts_path__.format('opensans_light.ttf')
FONT_PT_SERIF = __fonts_path__.format('ptserif_regular.ttf')
FONT_EXAMPLES = (FONT_8BIT, FONT_BEBAS, FONT_COMIC_NEUE, FONT_DIGITAL, FONT_FRANCHISE,
FONT_HELVETICA, FONT_MUNRO, FONT_NEVIS, FONT_OPEN_SANS,
FONT_OPEN_SANS_BOLD, FONT_OPEN_SANS_ITALIC, FONT_OPEN_SANS_LIGHT,
FONT_PT_SERIF, FONT_FIRACODE, FONT_FIRACODE_BOLD, FONT_FIRACODE_ITALIC,
FONT_FIRACODE_BOLD_ITALIC)
FontType = Union[str, __font.Font, Path]
FontInstance = (str, __font.Font, Path)
# Stores font cache
_cache: Dict[Tuple[FontType, int], '__font.Font'] = {}
def assert_font(font: Any) -> None:
"""
Asserts if the given object is a font type.
:param font: Font object
"""
assert isinstance(font, FontInstance), \
'value must be a font type (str, Path, pygame.Font)'
def get_font(name: FontType, size: int) -> '__font.Font':
"""
Return a :py:class:`pygame.font.Font` object from a name or file.
:param name: Font name or path
:param size: Font size in px
:return: Font object
"""
assert_font(name)
assert isinstance(size, int)
font: Optional['__font.Font']
if isinstance(name, __font.Font):
font = name
return font
else:
name = str(name)
if name == '':
raise ValueError('font name cannot be empty')
if size <= 0:
raise ValueError('font size cannot be lower or equal than zero')
# Font is not a file, then use a system font
if not path.isfile(name):
font_name = name
name = __font.match_font(font_name)
if name is None: # Show system available fonts
from difflib import SequenceMatcher
from random import randrange
system_fonts = __font.get_fonts()
# Get the most similar example
most_similar = 0
most_similar_index = 0
for i in range(len(system_fonts)):
# noinspection PyArgumentEqualDefault
sim = SequenceMatcher(None, system_fonts[i], font_name).ratio()
if sim > most_similar:
most_similar = sim
most_similar_index = i
sys_font_sim = system_fonts[most_similar_index]
sys_suggestion = f'system font "{font_name}" unknown, use "{sys_font_sim}" instead'
sys_message = 'check system fonts with pygame.font.get_fonts() function'
# Get examples
examples_number = 3
examples = []
j = 0
for i in range(len(system_fonts)):
font_random = system_fonts[randrange(0, len(system_fonts))]
if font_random not in examples:
examples.append(font_random)
j += 1
if j >= examples_number:
break
examples.sort()
fonts_random = ', '.join(examples)
sys_message_2 = f'some examples: {fonts_random}'
# Raise the exception
raise ValueError(f'{sys_suggestion}\n{sys_message}\n{sys_message_2}')
# Try to load the font
font = None
if (name, size) in _cache:
return _cache[(name, size)]
try:
font = __font.Font(name, size)
except IOError:
pass
# If font was not loaded throw an exception
if font is None:
raise IOError(f'font file "{font}" cannot be loaded')
_cache[(name, size)] = font
return font
|
243b6dcde18f941b70e0dae29ebdb6c7355207ef
|
7a19dfc7c8741e3c2e5e0c2f1b0260959c303ab4
|
/scripts/artifacts/viber.py
|
60f980506d5fa90219f1d2c3f5a131d3fe8d384a
|
[
"MIT"
] |
permissive
|
abrignoni/iLEAPP
|
89735f04b96a697c16e0d2edc44d95e529c01584
|
d9c43007aca17554148dbd2b0ffaad44fe3869f1
|
refs/heads/main
| 2023-08-08T13:36:48.234249
| 2023-08-01T20:29:17
| 2023-08-01T20:29:17
| 229,842,283
| 528
| 104
|
MIT
| 2023-09-12T21:45:41
| 2019-12-24T00:28:42
|
Python
|
UTF-8
|
Python
| false
| false
| 27,193
|
py
|
viber.py
|
# Get Viber settings, contacts, recent calls and messages information
# Author : Evangelos Dragonas (@theAtropos4n6)
# website : atropos4n6.com
# Date : 2022-03-15
# Version : 0.0.2
#
# The script queries Settings.data and Contacts.data Viber dbs and creates a report of findings including KML geolcation data
# Settings hold the user's personal data and configurations
# Contacts hold contacts, calls, messages and more..
#
# The code is divided in 4 queries-artifacts blocks.
#
# The 1st parses settings db, extracts and reports on user's available information regarding Viber configuartion
#
# The 2nd parses contacts db, extracts and reports on user's contacts.
# Be advised that a contact may not participate in a chat (therefore a contact is not a chat 'member') and vice versa. A chat 'member' may not be registered as a Viber contact.
# Hope it makes sense.
#
# The 3rd parses contacts db, extracts and reports on user's recent calls that have no corresponding message (ZVIBERMESSAGE) entry. This indicates that these messages have been deleted and therefore
# calls are deleted as well. Unfortunately no remote partner information can be retrieved at this moment.
#
# The 4rth parses contacts db, extracts and reports on user's chats. 2 extra columns with each chat's grouped participants and phone numbers is also available.
#
# Also, be aware that there is more information stored within the above databases. This artifact assist in parsing the most out of it (but not all).
#
# Should you face a bug or want a specific field extracted, DM me.
#
import glob
import os
import pathlib
import sqlite3
import json
import scripts.artifacts.artGlobals
from packaging import version
from html import escape
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, logdevinfo, timeline, kmlgen, tsv, is_platform_windows, open_sqlite_db_readonly, media_to_html
def get_viber(files_found, report_folder, seeker, wrap_text):
viber_settings = {}
for file_found in files_found:
file_found = str(file_found)
iOSversion = scripts.artifacts.artGlobals.versionf
if version.parse(iOSversion) < version.parse("14"):
logfunc("Viber parsing has not be tested on this iOS " + iOSversion + " version. Please contact @theAtropos4n6 for resolving this issue.")
if version.parse(iOSversion) >= version.parse("14"):
if file_found.endswith('Settings.data'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
Data.key,value
FROM Data
WHERE Data.key IN
(
'_myUserName',
'_currentEmail',
'_myPhoneNumber',
'_myCanonizedPhoneNumber',
'_myFormattedPhoneNumber',
'_myCountryPhoneCode',
'_myCountryCode',
'_myLanguageCode',
'_wasabiLastKnownUserLocation',
'_uid',
'_appVersion',
'_savedDeviceId',
'_attemptsToDownloadBackupForRestore',
'_backupAttemptsCount',
'_hiddenChatsPINData',
'_myPhotoLocalID'
)
UNION
SELECT
Data.key,
CASE
WHEN value LIKE '-%' THEN value
ELSE datetime(value,'unixepoch')
END
FROM Data
WHERE Data.key IN
('_registrationDate',
'_autoBackupLastRunTime',
'_lastBackupStartDate')
UNION
SELECT
Data.key,
datetime(value,'unixepoch') -- this value is stored in the user localtime
FROM Data
WHERE Data.key IS '_birthdate'
ORDER BY value
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list =[]
for row in all_rows:
viber_settings[row[0]] = row[1]
temp_list = list(row)
if temp_list[0] == '_appVersion':
temp_list[0] = 'Application Version'
elif temp_list[0] == '_lastBackupStartDate':
temp_list[0] = 'Last Backup Start Date - UTC'
elif temp_list[0] == '_myUserName':
temp_list[0] = 'User Name'
elif temp_list[0] == '_currentEmail':
temp_list[0] = 'Current Email'
elif temp_list[0] == '_birthdate':
temp_list[0] = "Birth Date - UTC (apply user's localtime offset)"
elif temp_list[0] == '_registrationDate':
temp_list[0] = 'Registration Date - UTC'
elif temp_list[0] == '_uid':
temp_list[0] = 'User ID'
elif temp_list[0] == '_myPhoneNumber':
temp_list[0] = 'Phone Number'
elif temp_list[0] == '_myCanonizedPhoneNumber':
temp_list[0] = 'Canonized Phone Number'
elif temp_list[0] == '_myFormattedPhoneNumber':
temp_list[0] = 'Formatted Phone Number'
elif temp_list[0] == '_myCountryPhoneCode':
temp_list[0] = 'Country Phone Code'
elif temp_list[0] == '_myCountryCode':
temp_list[0] = 'Country Code'
elif temp_list[0] == '_myLanguageCode':
temp_list[0] = 'Language Code'
elif temp_list[0] == '_wasabiLastKnownUserLocation':
temp_list[0] = 'Last Known User Location'
elif temp_list[0] == '_savedDeviceId':
temp_list[0] = 'Device ID'
elif temp_list[0] == '_myPhotoLocalID':
temp_list[0] = 'Profile Picture'
try:
if temp_list[1] is not None:
thumb = media_to_html(temp_list[1], files_found, report_folder)
temp_list[1] = thumb
else:
thumb = ''
except:
pass
elif temp_list[0] == '_attemptsToDownloadBackupForRestore':
temp_list[0] = 'Attempts To Download Backup For Restore'
try:
int.from_bytes(temp_list[1], byteorder='big') #needs further validation about the byteorder
except Exception as err:
logfunc(f'Viber - Settings "_attemptsToDownloadBackupForRestore" could not be extracted. The error was: {err}' )
elif temp_list[0] == '_backupAttemptsCount':
temp_list[0] = 'Backup Attempts Count'
try:
int.from_bytes(temp_list[1], byteorder='big') #needs further validation about the byteorder
except Exception as err:
logfunc(f'Viber - Settings "_backupAttemptsCount" could not be extracted. The error was: {err}' )
elif temp_list[0] == '_autoBackupLastRunTime':
temp_list[0] = 'Auto Backup Last Run Time - UTC'
x = str(temp_list[1])
if x.startswith("-"):
temp_list[1] = 'Not Applied'
elif temp_list[0] == '_lastBackupStartDate':
x = str(temp_list[1])
if x.startswith("-"):
temp_list[1] = 'Not Applied'
elif temp_list[0] == '_hiddenChatsPINData':
temp_list[0] = 'Hidden Chats PIN Data'
row = tuple(temp_list)
data_list.append((row[0], row[1]))
if usageentries > 0:
report = ArtifactHtmlReport('Viber - Settings')
report.start_artifact_report(report_folder, 'Viber - Settings')
report.add_script()
data_headers = ('Setting','Value')
report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)
report.end_artifact_report()
tsvname = 'Viber - Settings'
tsv(report_folder, data_headers, data_list, tsvname)
db.close()
elif file_found.endswith('Contacts.data'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
ZABCONTACT.ZMAINNAME AS 'Main Name',
ZABCONTACT.ZPREFIXNAME AS 'Prefix Name',
ZABCONTACT.ZSUFFIXNAME AS 'Suffix Name',
ZABCONTACTNUMBER.ZPHONE AS 'Phone Number',
ZABCONTACTNUMBER.ZCANONIZEDPHONENUM AS 'Canonized Phone Number',
ZABCONTACT.ZCONTACTID AS 'Contact ID'
FROM ZABCONTACT
LEFT JOIN ZABCONTACTNUMBER ON ZABCONTACT.Z_PK = ZABCONTACTNUMBER.ZCONTACT
UNION
SELECT
ZABCONTACT.ZMAINNAME AS 'Main Name',
ZABCONTACT.ZPREFIXNAME AS 'Prefix Name',
ZABCONTACT.ZSUFFIXNAME AS 'Suffix Name',
ZABCONTACTNUMBER.ZPHONE AS 'Phone Number',
ZABCONTACTNUMBER.ZCANONIZEDPHONENUM AS 'Canonized Phone Number',
ZABCONTACT.ZCONTACTID AS 'Contact ID'
FROM ZABCONTACTNUMBER
LEFT JOIN ZABCONTACT ON ZABCONTACT.Z_PK = ZABCONTACTNUMBER.ZCONTACT
ORDER BY ZMAINNAME
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list =[]
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5]))
if usageentries > 0:
report = ArtifactHtmlReport('Viber - Contacts')
report.start_artifact_report(report_folder, 'Viber - Contacts')
report.add_script()
data_headers = ('Main Name','Prefix Name','Suffix Name','Phone Number','Canonized Phone Number','Contact ID')
report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)
report.end_artifact_report()
tsvname = 'Viber - Contacts'
tsv(report_folder, data_headers, data_list, tsvname)
cursor.execute('''
SELECT
datetime(ZRECENT.ZDATE+ 978307200,'unixepoch') AS 'Timestamp - UTC',
ZRECENT.ZRECENTSLINE AS 'EMPTY DUMMY COLUMN',
CASE
WHEN ZRECENT.ZCALLTYPE = 'missed' THEN 'Missed Audio Call'
WHEN ZRECENT.ZCALLTYPE = 'missed_with_video' THEN 'Missed Video Call'
WHEN ZRECENT.ZCALLTYPE = 'outgoing_viber' THEN 'Outgoing Audio Call'
WHEN ZRECENT.ZCALLTYPE = 'outgoing_viber_with_video' THEN 'Outgoing Video Call'
WHEN ZRECENT.ZCALLTYPE = 'incoming_with_video' THEN 'Incoming Video Call'
WHEN ZRECENT.ZCALLTYPE = 'incoming' THEN 'Incoming Audio Call'
ELSE ZRECENT.ZCALLTYPE
end AS 'Call Type',
ZRECENT.ZDURATION AS 'Duration'
FROM ZRECENT
WHERE ZRECENT.ZCALLLOGMESSAGE IS NULL AND ZRECENT.ZRECENTSLINE IS NULL
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list =[]
for row in all_rows:
temp_list = list(row)
try:
if 'Outgoing' in temp_list[2]:
temp_list[1] = str(viber_settings['_myUserName']) + ',' + str(viber_settings['_myPhoneNumber'])
except TypeError:
pass
row = tuple(temp_list)
data_list.append((row[0], row[1], row[2], row[3]))
if usageentries > 0:
report = ArtifactHtmlReport('Viber - Call Remnants')
report.start_artifact_report(report_folder, 'Viber - Call Remnants')
report.add_script()
data_headers = ('Timestamp - UTC','Caller','Call Type','Duration')
report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)
report.end_artifact_report()
cursor.execute('''
SELECT
CHAT_MEMBER.ZDISPLAYFULLNAME AS 'Sender (Display Full Name)',
CHAT_MEMBER.ZDISPLAYSHORTNAME AS 'Sender (Display Short Name)',
CHAT_MEMBER.ZPHONE AS 'Sender (Phone)',
CHATS.Chat_Name AS 'Chat Name',
CHATS.CHAT_MEMBERS AS 'Chat Participant(s)',
CHATS.CHAT_PHONES 'Chat Phone(s)',
datetime(ZVIBERMESSAGE.ZSTATEDATE+ 978307200,'unixepoch') AS 'Message Creation Date - UTC',
datetime(ZVIBERMESSAGE.ZDATE+ 978307200,'unixepoch') AS 'Message Change State Date - UTC',
datetime(RECENT.ZRECENTDATE+ 978307200,'unixepoch') AS 'Call Date - UTC',
CASE
WHEN ZCALLTYPE = 'missed' THEN 'Missed Audio Call'
WHEN ZCALLTYPE = 'missed_with_video' THEN 'Missed Video Call'
WHEN ZCALLTYPE = 'outgoing_viber' THEN 'Outgoing Audio Call'
WHEN ZCALLTYPE = 'outgoing_viber_with_video' THEN 'Outgoing Video Call'
WHEN ZCALLTYPE = 'incoming_with_video' THEN 'Incoming Video Call'
WHEN ZCALLTYPE = 'incoming' THEN 'Incoming Audio Call'
ELSE ZCALLTYPE
end AS 'Call Type',
CASE
WHEN ZVIBERMESSAGE.ZSTATE IN ('send','delivered') THEN 'Outgoing'
WHEN ZVIBERMESSAGE.ZSTATE = 'received' THEN 'Incoming'
ELSE ZVIBERMESSAGE.ZSTATE
END AS 'State',
RECENT.ZDURATION AS 'Duration',
ZVIBERMESSAGE.ZSYSTEMTYPE 'System Type Description',
ZVIBERMESSAGE.ZMETADATA AS 'Message Metadata',
ZVIBERMESSAGE.ZTEXT AS 'Message Content',
ZATTACHMENT.ZNAME AS 'Attachment Name',
ZATTACHMENT.ZTYPE AS 'Attachment Type',
ZATTACHMENT.ZFILESIZE AS 'Attachment Size',
ZVIBERLOCATION.ZLATITUDE AS 'Latitude',
ZVIBERLOCATION.ZLONGITUDE AS 'Longitude',
CASE
WHEN CHATS.Chat_Deleted = 1 THEN 'True'
WHEN CHATS.Chat_Deleted = 0 THEN 'False'
ELSE CHATS.Chat_Deleted
END AS 'Conversation Deleted',
CASE
WHEN ZVIBERMESSAGE.ZBEINGDELETED = 1 THEN 'True'
WHEN ZVIBERMESSAGE.ZBEINGDELETED = 0 THEN 'False'
ELSE ZVIBERMESSAGE.ZBEINGDELETED
END AS 'Message Deleted',
CHATS.ZTIMEBOMBDURATION AS 'Conversation Time Bomb Duration',
ZVIBERMESSAGE.ZTIMEBOMBDURATION AS 'Message Time Bomb Duration',
datetime(ZVIBERMESSAGE.ZTIMEBOMBTIMESTAMP+ 978307200,'unixepoch') AS 'Message Time Bomb Timestamp',
CASE
WHEN CHATS.Chat_Favorite= 1 THEN 'True'
WHEN CHATS.Chat_Favorite = 0 THEN 'False'
ELSE CHATS.Chat_Favorite
END AS 'Conversation Marked Favorite',
ZVIBERMESSAGE.ZLIKESCOUNT AS 'Likes Count'
FROM
ZVIBERMESSAGE
LEFT JOIN
(SELECT
ZVIBERMESSAGE.ZCONVERSATION,
ZCONVERSATION.ZNAME AS 'Chat_Name',
ZCONVERSATION.ZBEINGDELETED AS 'Chat_Deleted',
ZCONVERSATION.ZFAVORITE AS 'Chat_Favorite',
ZCONVERSATION.ZTIMEBOMBDURATION,
coalesce(ZVIBERMESSAGE.ZPHONENUMINDEX,ZCONVERSATION.ZINTERLOCUTOR) AS 'MEMBER_ID',
MEMBER.ZDISPLAYFULLNAME,
MEMBER.ZDISPLAYSHORTNAME,
MEMBER.ZNAME AS 'Participant_Name',
MEMBER.ZCANONIZEDPHONENUM,
MEMBER.ZPHONE,
group_concat(DISTINCT(MEMBER.ZDISPLAYFULLNAME)) AS 'CHAT_MEMBERS',
group_concat(DISTINCT(MEMBER.ZPHONE)) AS 'CHAT_PHONES',
group_concat(DISTINCT(MEMBER.ZCANONIZEDPHONENUM)) AS 'CHAT_CANONIZED_PHONES'
FROM ZVIBERMESSAGE,ZCONVERSATION
LEFT JOIN
(SELECT
ZMEMBER.ZDISPLAYFULLNAME,
ZMEMBER.ZDISPLAYSHORTNAME,
ZMEMBER.ZNAME,
ZPHONENUMBER.ZCANONIZEDPHONENUM,
ZPHONENUMBER.ZPHONE,
ZMEMBER.Z_PK
FROM ZMEMBER
LEFT JOIN ZPHONENUMBER ON ZMEMBER.Z_PK = ZPHONENUMBER.ZMEMBER
UNION
SELECT
ZMEMBER.ZDISPLAYFULLNAME,
ZMEMBER.ZDISPLAYSHORTNAME,
ZMEMBER.ZNAME,
ZPHONENUMBER.ZCANONIZEDPHONENUM,
ZPHONENUMBER.ZPHONE,
ZMEMBER.Z_PK
FROM ZPHONENUMBER
LEFT JOIN ZMEMBER ON ZPHONENUMBER.ZMEMBER = ZMEMBER.Z_PK
) AS MEMBER ON MEMBER.Z_PK = MEMBER_ID
LEFT JOIN ZPHONENUMBER ON MEMBER_ID = ZPHONENUMBER.ZMEMBER
WHERE ZVIBERMESSAGE.ZCONVERSATION = ZCONVERSATION.Z_PK
GROUP BY ZVIBERMESSAGE.ZCONVERSATION
) CHATS ON ZVIBERMESSAGE.ZCONVERSATION = CHATS.ZCONVERSATION
LEFT JOIN
(SELECT
ZMEMBER.ZDISPLAYFULLNAME,
ZMEMBER.ZDISPLAYSHORTNAME,
ZMEMBER.ZNAME,
ZPHONENUMBER.ZCANONIZEDPHONENUM,
ZPHONENUMBER.ZPHONE,
ZMEMBER.Z_PK
FROM ZMEMBER
LEFT JOIN ZPHONENUMBER ON ZMEMBER.Z_PK = ZPHONENUMBER.ZMEMBER
UNION
SELECT
ZMEMBER.ZDISPLAYFULLNAME,
ZMEMBER.ZDISPLAYSHORTNAME,
ZMEMBER.ZNAME,
ZPHONENUMBER.ZCANONIZEDPHONENUM,
ZPHONENUMBER.ZPHONE,
ZMEMBER.Z_PK
FROM ZPHONENUMBER
LEFT JOIN ZMEMBER ON ZPHONENUMBER.ZMEMBER = ZMEMBER.Z_PK
) AS CHAT_MEMBER ON ZVIBERMESSAGE.ZPHONENUMINDEX = CHAT_MEMBER.Z_PK
LEFT JOIN
(SELECT
ZRECENT.ZDURATION,
ZRECENT.ZCALLLOGMESSAGE,
ZRECENT.ZDATE AS 'ZRECENTDATE',
ZRECENTSLINE.ZDATE AS 'ZRECENTSLINEDATE',
ZRECENT.ZCALLTYPE AS 'CALL TYPE',
ZRECENTSLINE.ZCALLTYPE AS 'CALL TYPE',
ZRECENTSLINE.ZPHONENUMBER AS 'PHONE NUMBER'
FROM ZRECENT
LEFT JOIN ZRECENTSLINE ON ZRECENT.ZRECENTSLINE = ZRECENTSLINE.Z_PK
) AS RECENT ON ZVIBERMESSAGE.Z_PK = RECENT.ZCALLLOGMESSAGE
LEFT JOIN ZVIBERLOCATION ON ZVIBERMESSAGE.ZLOCATION = ZVIBERLOCATION.Z_PK
LEFT JOIN ZATTACHMENT ON ZVIBERMESSAGE.ZATTACHMENT = ZATTACHMENT.Z_PK
ORDER BY ZVIBERMESSAGE.Z_PK
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list =[]
for row in all_rows:
temp_list = list(row)
temp_chats_names = str(temp_list[4])
temp_list[4] = temp_chats_names + ',' + str(viber_settings['_myUserName'])
temp_chats_phones = str(temp_list[5])
temp_list[5] = temp_chats_phones + ',' + str(viber_settings['_myPhoneNumber'])
if temp_list[13]:
y = json.loads(temp_list[13], strict=False) # the key that stores geolocation data is ['pa_message_data']['rich_media']['Buttons'][2]['Map']
#if the 'Map' key is identified successfully it will assign lat,lon to the corresponding columns, otherwise it will continue on (passing any key,index errors)
temp_list[13] = ''
try:
temp_list[18] = y['pa_message_data']['rich_media']['Buttons'][2]['Map']['Latitude']
temp_list[19] = y['pa_message_data']['rich_media']['Buttons'][2]['Map']['Longitude']
except (KeyError,IndexError) as e:
pass
#What this ugly long list of dict keys simply does is that it extracts only specific fields identified as important from the whole dictionary.
#The reason why we extract only specific fields is because the report is much prettier. In order to have a complete picture you will have to go through the whole dictionary
#while inspecting the .db itself. Therefore this column is named as 'Message Metadata Fragments'
#general values
if "Text" in y:
try:
temp_list[13] += "Text: "+ str(y['Text'])+","
except KeyError:
pass
if "Title" in y:
try:
temp_list[13] += "Title: "+ str(y['Title'])+","
except KeyError:
pass
if "URL" in y:
try:
temp_list[13] += "URL: "+ str(y['URL'])+","
except KeyError:
pass
if "ThumbnailURL" in y:
try:
temp_list[13] += "ThumbnailURL: "+ str(y['ThumbnailURL'])+","
except KeyError:
pass
if "Type" in y:
try:
temp_list[13] += "Type: "+ str(y['Type'])+","
except KeyError:
pass
if "generalFwdInfo" in y:
try:
temp_list[13] += "Original Chat ID: "+ str(y['generalFwdInfo']['orig_chat_id'])+","
except KeyError:
pass
if "audio_ptt" in y:
try:
temp_list[13] += "Audio Duration: "+ str(y['audio_ptt']['duration'])+","
except KeyError:
pass
#fileInfo values
if "fileInfo" in y:
try:
temp_list[13] += "File Info - Content Type: "+ str(y['fileInfo']['ContentType'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Type: "+ str(y['fileInfo']['Type'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Hash: "+ str(y['fileInfo']['FileHash'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Name: "+ str(y['fileInfo']['FileName'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Extension: "+ str(y['fileInfo']['FileExt'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Duration: "+ str(y['fileInfo']['Duration'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Size: "+ str(y['fileInfo']['FileSize'])+","
except KeyError:
pass
try:
temp_list[13] += "File Info - Original Size: "+ str(y['fileInfo']['OrigSize'])+","
except KeyError:
pass
try:
temp_list[13] += "File|Media Info - iOS Origin: "+ str(y['fileInfo']['mediaInfo']['ios_origin'])+","
except KeyError:
pass
try:
temp_list[13] += "File|Media Info - Width: "+ str(y['fileInfo']['mediaInfo']['Width'])+","
except KeyError:
pass
try:
temp_list[13] += "File|Media Info - Height: "+ str(y['fileInfo']['mediaInfo']['Height'])+","
except KeyError:
pass
try:
temp_list[13] += "File|Media Info - Media Type: "+ str(y['fileInfo']['mediaInfo']['MediaType'])+","
except KeyError:
pass
#custom_sticker_info values
if "custom_sticker_info" in y:
try:
temp_list[13] += "Custom Sticker Info - Package ID: "+ str(y['custom_sticker_info']['package_id'])+","
except KeyError:
pass
try:
temp_list[13] += "Custom Sticker Info - Sticker ID: "+ str(y['custom_sticker_info']['sticker_id'])+","
except KeyError:
pass
#groupReferralInfo values
if "groupReferralInfo" in y:
try:
temp_list[13] += "Group ID: "+ str(y['groupReferralInfo']['groupID'])+","
except KeyError:
pass
try:
temp_list[13] += "Group Name: "+ str(y['groupReferralInfo']['groupName'])+","
except KeyError:
pass
try:
temp_list[13] += "Invite Link: "+ str(y['groupReferralInfo']['inviteLink'])+","
except KeyError:
pass
#pa_message_data values
if "pa_message_data" in y:
try:
temp_list[13] += "Message Data - Text: "+ str(y['pa_message_data']['text'])+","
except KeyError:
pass
try:
temp_list[13] += "Message Data - Sender Name: "+ str(y['pa_message_data']['sender']['name'])+","
except KeyError:
pass
try:
temp_list[13] += "Message Data - Alt Text: "+ str(y['pa_message_data']['alt_text'])+","
except KeyError:
pass
try:
temp_list[13] += "Message Data - Favorites Metadata - URL: "+ str(y['pa_message_data']['rich_media']['FavoritesMetadata']['url'])+","
except KeyError:
pass
#pin values
if "pin" in y:
try:
temp_list[13] += "Pin - Action: "+ str(y['pin']['action'])+","
except KeyError:
pass
try:
temp_list[13] += "Pin - Text: "+ str(y['pin']['text'])+","
except KeyError:
pass
try:
temp_list[13] += "Pin - Description: "+ str(y['pin']['extended']['descr'])+","
except KeyError:
pass
#poll values
if "poll" in y:
try:
temp_list[13] += "Poll - Group ID: "+ str(y['poll']['groupID'])+","
except KeyError:
pass
try:
temp_list[13] += "Poll - Type: "+ str(y['poll']['type'])+","
except KeyError:
pass
try:
temp_list[13] += "Poll - Sender ID: "+ str(y['poll']['senderID'])+","
except KeyError:
pass
try:
temp_list[13] += "Poll - Multiple: "+ str(y['poll']['multiple'])+","
except KeyError:
pass
try:
temp_list[13] += "Poll - Quiz Text: "+ str(y['poll']['quiz_text'])+","
except KeyError:
pass
try:
temp_list[13] += "Poll - Description: "+ str(y['poll']['extended']['descr'])+","
except KeyError:
pass
try:
if y['poll']['options']:
z = ''
for x in y['poll']['options']:
try:
z = x['count']
temp_list[13] += "Poll - Options - Count: "+ str(z)+","
except (KeyError,IndexError) as e:
pass
try:
z = x['name']
temp_list[13] += "Poll - Options - Name: "+ str(z)+","
except (KeyError,IndexError) as e:
pass
try:
z = x['isLiked']
temp_list[13] += "Poll - Options - Is Liked: "+ str(z)+","
except (KeyError,IndexError) as e:
pass
except (KeyError,IndexError) as e:
pass
#quote values
if "quote" in y:
try:
temp_list[13] += "Quote - Text: "+ str(y['quote']['text'])+","
except KeyError:
pass
try:
temp_list[13] += "Quote - Name: "+ str(y['quote']['name'])+","
except KeyError:
pass
try:
temp_list[13] += "Quote - Attachment Name: "+ str(y['quote']['attachmentName'])+","
except KeyError:
pass
try:
temp_list[13] += "Quote - Attachment UID: "+ str(y['quote']['attachmentUID'])+","
except KeyError:
pass
try:
temp_list[13] += "Quote - Attachment Preview Path: "+ str(y['quote']['attachmentPreviewPath'])+","
except KeyError:
pass
try:
temp_list[13] += "Quote - Text Meta Info - Data: "+ y['quote']['textMetaInfo_v2'][0]['data']+","
except (KeyError,IndexError) as e:
pass
if temp_list[10] == 'Outgoing':
temp_list[0] = viber_settings['_myUserName']
temp_list[1] = ''
temp_list[2] = viber_settings['_myPhoneNumber']
if row[15] is not None:
thumb = media_to_html(row[15], files_found, report_folder)
else:
thumb = ''
row = tuple(temp_list)
data_list.append((row[6], row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[14], row[15], thumb, row[8], row[9], row[10], row[11], row[12], row[16], row[17], row[18],row[19], row[20], row[21], row[22], row[23], row[24], row[25], row[26], row[13]))
if usageentries > 0:
report = ArtifactHtmlReport('Viber - Chats')
report.start_artifact_report(report_folder, 'Viber - Chats')
report.add_script()
data_headers = ('Timestamp', 'Sender (Display Full Name)','Sender (Display Short Name)','Sender (Phone)','Chat Name','Chat Participant(s)','Chat Phone(s)', 'Message Creation Date - UTC','Message Change State Date - UTC','Message Content','Attachment Name', 'Attachment','Call Date - UTC','Call Type','State','Duration (Seconds)','System Type Description','Attachment Type','Attachment Size','Latitude','Longitude','Conversation Deleted','Message Deleted', 'Conversation Time Bomb Duration','Message Time Bomb Duration','Message Time Bomb Timestamp - UTC','Conversation Marked Favorite','Likes Count','Message Metadata Fragments')
report.write_artifact_data_table(data_headers, data_list, file_found, html_no_escape=['Attachment']) #html_escape=False
report.end_artifact_report()
kmlactivity = 'Viber - Chats'
kmlgen(report_folder, kmlactivity, data_list, data_headers)
tsvname = 'Viber - Chats'
tsv(report_folder, data_headers, data_list, tsvname)
db.close()
else:
logfunc('No Viber data found.')
__artifacts__ = {
"viber": (
"Viber",
('**/com.viber/settings/Settings.data','**/com.viber/database/Contacts.data','**/Containers/Data/Application/*/Documents/Attachments/*.*','**/com.viber/ViberIcons/*.*'),
get_viber)
}
|
7b3322dfead3888a3d22f2e1834b910421a2d6f2
|
a29b8d6ae6642ef80d04ae99d721b703de06db69
|
/maro/simulator/scenarios/vm_scheduling/cluster.py
|
d598353bebcdd288a50fd75628ef19e2531e5a81
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/maro
|
6aab1a4e86fddabf7f242f0d1020d985a5f7a5f3
|
b3c6a589ad9036b03221e776a6929b2bc1eb4680
|
refs/heads/master
| 2023-08-24T16:52:38.250279
| 2023-05-15T04:31:58
| 2023-05-15T04:31:58
| 230,389,247
| 764
| 158
|
MIT
| 2023-07-25T20:59:06
| 2019-12-27T06:48:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
cluster.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List
from maro.backends.frame import NodeAttribute, NodeBase, node
@node("clusters")
class Cluster(NodeBase):
"""Cluster node definition in frame."""
id = NodeAttribute("i2")
region_id = NodeAttribute("i2")
zone_id = NodeAttribute("i2")
data_center_id = NodeAttribute("i2")
# Total number of machines in the cluster.
total_machine_num = NodeAttribute("i")
# The number of empty machines in this cluster. A empty machine means that its allocated CPU cores are 0.
empty_machine_num = NodeAttribute("i")
def __init__(self):
self._id: int = 0
self._region_id: int = 0
self._zone_id: int = 0
self._data_center_id: int = 0
self._total_machine_num: int = 0
self._cluster_type: str = ""
self._rack_list: List[int] = []
def set_init_state(self, id: int, region_id: int, zone_id: int, data_center_id: int, total_machine_num: int):
"""Set initialize state, that will be used after frame reset.
Args:
id (int): Region id.
"""
self._id = id
self._region_id = region_id
self._zone_id = zone_id
self._data_center_id = data_center_id
self._total_machine_num = total_machine_num
self.reset()
def reset(self):
"""Reset to default value."""
self.id = self._id
self.region_id = self._region_id
self.zone_id = self._zone_id
self.data_center_id = self._data_center_id
self.total_machine_num = self._total_machine_num
self.empty_machine_num = self.total_machine_num
@property
def rack_list(self) -> List[int]:
return self._rack_list
@rack_list.setter
def rack_list(self, rack_list: List[int]):
self._rack_list = rack_list
@property
def cluster_type(self) -> str:
return self._cluster_type
@cluster_type.setter
def cluster_type(self, cluster_type: str):
self._cluster_type = cluster_type
|
0b6067778adbc6a977875efb340d88f5abeaad1c
|
ed75170ffe743eea5f2fedd518c21b61de55f879
|
/tbats/error/__init__.py
|
9befdef3851603ca020185dd2bdd14db44cdd3c5
|
[
"MIT"
] |
permissive
|
intive-DataScience/tbats
|
d294fe6d3dcb4ec0b2fc0db5e6aaaae08ade14b5
|
184bd635e1aea6bd1dd0ac7fa2339257b9ca6bdb
|
refs/heads/master
| 2023-04-30T02:25:00.961248
| 2023-04-17T10:20:31
| 2023-04-17T10:20:31
| 162,722,338
| 162
| 21
|
MIT
| 2022-12-09T07:30:53
| 2018-12-21T14:19:21
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
__init__.py
|
from .BatsException import BatsException
from .BatsWarning import BatsWarning
from .InputArgsException import InputArgsException
from .InputArgsWarning import InputArgsWarning
from .ModelWarning import ModelWarning
from .ExceptionHandler import ExceptionHandler
__all__ = ['BatsException', 'BatsWarning', 'InputArgsException', 'InputArgsWarning', 'ModelWarning', 'ExceptionHandler']
|
c78155c42db07f087be3d0a1cc20ed4590d147b7
|
8672b779b8f506ab7709319023307a44eba92218
|
/sample/sample_insert_dict.py
|
88ce0dfdc9afaac8142df406280294ab19b0834c
|
[
"MIT"
] |
permissive
|
thombashi/SimpleSQLite
|
6975de1f873793783e8998ff30f18f4ed7270272
|
26dd2ab6bc32f766e67e338301a43f6213f61bba
|
refs/heads/master
| 2023-08-28T09:18:08.480668
| 2023-06-24T16:55:38
| 2023-06-24T16:55:38
| 52,209,201
| 140
| 20
|
MIT
| 2023-09-10T05:53:21
| 2016-02-21T14:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 804
|
py
|
sample_insert_dict.py
|
#!/usr/bin/env python3
from simplesqlite import SimpleSQLite
def main():
table_name = "sample_table"
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name, ["attr_a", "attr_b", "attr_c", "attr_d", "attr_e"], [[1, 1.1, "aaa", 1, 1]]
)
con.insert(
table_name,
record={"attr_a": 4, "attr_b": 4.4, "attr_c": "ddd", "attr_d": 4.44, "attr_e": "hoge"},
)
con.insert_many(
table_name,
records=[
{"attr_a": 5, "attr_b": 5.5, "attr_c": "eee", "attr_d": 5.55, "attr_e": "foo"},
{"attr_a": 6, "attr_c": "fff"},
],
)
result = con.select(select="*", table_name=table_name)
for record in result.fetchall():
print(record)
if __name__ == "__main__":
main()
|
f41d5e249307284a6a1308aeb7c44aa06731d6e8
|
bb71b5b3ef0e6eb5cfd27e943e206f40cd0aeb90
|
/tests_e2e/orchestrator/scripts/check-agent-log.py
|
8807f8046abcb2dc2428d84d9c4ee808c3d08a74
|
[
"Apache-2.0"
] |
permissive
|
Azure/WALinuxAgent
|
c35af1df7b52e3e9621757fe7992f3fa4c7c8c49
|
28345a55f9b21dae89472111635fd6e41809d958
|
refs/heads/master
| 2023-08-24T22:08:56.646723
| 2023-07-27T00:44:46
| 2023-07-27T00:44:46
| 4,576,639
| 473
| 436
|
Apache-2.0
| 2023-09-14T20:11:34
| 2012-06-06T18:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
check-agent-log.py
|
#!/usr/bin/env pypy3
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import sys
from pathlib import Path
from tests_e2e.tests.lib.agent_log import AgentLog
try:
parser = argparse.ArgumentParser()
parser.add_argument('path', nargs='?', help='Path of the log file', default='/var/log/waagent.log')
parser.add_argument('-j', '--json', action='store_true', help='Produce a JSON report')
parser.set_defaults(json=False)
args = parser.parse_args()
error_list = AgentLog(Path(args.path)).get_errors()
if args.json:
print(json.dumps(error_list, default=lambda o: o.__dict__))
else:
if len(error_list) == 0:
print("No errors were found.")
else:
for e in error_list:
print(e.text)
except Exception as e:
print(f"{e}", file=sys.stderr)
sys.exit(1)
sys.exit(0)
|
20583ae40330ebb316a3fa539f5277dbdc3c08cf
|
3551f4a2e2835ee0e83c307b2365eab7a3340641
|
/django_unused_media/remove.py
|
5c83e381d6302e799ad3e9602c9bd4675e8dc10b
|
[
"MIT"
] |
permissive
|
akolpakov/django-unused-media
|
b6500ea6498271aa2ef42ed38c0a09c0a9db4a2b
|
6be3275bebd0b12871b0d94b0e941c68d6fee046
|
refs/heads/master
| 2021-11-24T20:06:23.983218
| 2021-11-23T21:53:11
| 2021-11-23T21:53:11
| 22,923,082
| 125
| 37
|
MIT
| 2021-11-23T21:53:12
| 2014-08-13T16:40:50
|
Python
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
remove.py
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
def remove_media(files):
"""
Delete file from media dir
"""
for filename in files:
os.remove(os.path.join(settings.MEDIA_ROOT, filename))
def remove_empty_dirs(path=None):
"""
Recursively delete empty directories; return True if everything was deleted.
"""
if not path:
path = settings.MEDIA_ROOT
if not os.path.isdir(path):
return False
listdir = [os.path.join(path, filename) for filename in os.listdir(path)]
if all(list(map(remove_empty_dirs, listdir))):
os.rmdir(path)
return True
else:
return False
|
9e55a42d19c549f59d373dabec73e3ce6d235d7c
|
5e66707ccdea0c000e6e269fce6907ee3cfcdbde
|
/galaxy/main/migrations/0065_namespace_refactor.py
|
515d5e9edb84d5648941e30c997a075979e91cf9
|
[
"Apache-2.0"
] |
permissive
|
ansible/galaxy
|
f629046d579d7cd4e484cdf1e27ad68fe7b170a2
|
6a374cacdf0f04de94486913bba5285e24e178d3
|
refs/heads/devel
| 2023-09-04T09:21:43.542346
| 2023-08-25T16:58:09
| 2023-08-25T16:58:09
| 24,333,272
| 972
| 419
|
Apache-2.0
| 2023-08-25T17:38:20
| 2014-09-22T15:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 9,266
|
py
|
0065_namespace_refactor.py
|
from django.db import models, migrations
from django.conf import settings
import galaxy.main.mixins
import galaxy.main.fields
COPY_NAMESPACE_DATA = """
INSERT INTO main_providernamespace (
description, created, modified, active, name, display_name,
avatar_url, location, company, email, html_url, followers)
SELECT
description, created, modified, active, a.namespace, name,
avatar_url, location, company, email, html_url, followers
FROM main_namespace as a INNER JOIN
(SELECT namespace, min(id) as id
FROM main_namespace
GROUP BY namespace) as b ON a.namespace = b.namespace and a.id = b.id
"""
ADD_REPO_GITHUB_USERS = """
INSERT INTO main_providernamespace
(created, modified, active, name, description)
SELECT created, modified, true, b.github_user, b.github_user
FROM main_repository as a INNER JOIN
(SELECT github_user, min(id) as id
FROM main_repository
GROUP BY github_user) as b
ON a.github_user = b.github_user and a.id = b.id
WHERE a.github_user not in (
SELECT name FROM main_providernamespace WHERE name = a.github_user)
"""
ADD_ROLE_NAMESPACE = """
INSERT INTO main_providernamespace
(created, modified, active, name, description)
SELECT
created, modified, true, a.namespace, a.namespace
FROM main_content as a INNER JOIN
(SELECT namespace, min(id) as id
FROM main_content
GROUP BY namespace) as b ON a.namespace = b.namespace and a.id = b.id
WHERE a.namespace not in (
SELECT name FROM main_providernamespace WHERE name = a.namespace)
"""
NAMESPACE_FROM_PROVIDER_NAMESPACE = """
INSERT INTO main_namespace (
name, description, created, modified, active, original_name,
avatar_url, location, company, email, html_url
)
SELECT
a.name, description, created, modified, true, a.name,
avatar_url, location, company, email, html_url
FROM main_providernamespace as a INNER JOIN
(SELECT name, min(id) as id
FROM main_providernamespace
GROUP BY name) as b ON a.name = b.name and a.id = b.id
"""
SET_PROVIDER_NAMESPACE_FK = """
UPDATE main_providernamespace
SET
provider_id = (
SELECT id
FROM main_provider
WHERE name = 'GitHub'
),
namespace_id = (
SELECT id
FROM main_namespace
WHERE main_namespace.name = main_providernamespace.name
)
"""
ADD_NAMESPACE_OWNERS = """
INSERT INTO main_namespace_owners
( customuser_id, namespace_id)
SELECT u.id, n.id
FROM accounts_customuser as u, main_namespace as n
WHERE u.github_user = n.name
"""
ADD_GITHUB_PROVIDER = """
INSERT INTO main_provider
(created, modified, description, name, original_name, active)
VALUES
(CURRENT_DATE, CURRENT_DATE, 'Public GitHub', 'GitHub', 'GitHub', true)
"""
ADD_MISSING_OWNERS = """
INSERT INTO main_namespace_owners (
namespace_id,
customuser_id
)
SELECT DISTINCT
a.namespace_id,
a.customuser_id
FROM (
SELECT
c.id AS namespace_id,
a.customuser_id AS customuser_id
FROM
main_repository_owners a,
main_repository b,
main_namespace c
WHERE
a.repository_id = b.id
AND b.github_user = c.name
) AS a
LEFT JOIN main_namespace_owners AS b
ON a.namespace_id = b.namespace_id
AND a.customuser_id = b.customuser_id
WHERE b.namespace_id IS NULL
"""
class Migration(migrations.Migration):
dependencies = [
('main', '0064_rename_role'),
('accounts', '0003_auto_20151125_0840'),
]
operations = [
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('description', galaxy.main.fields.TruncatingCharField(
default='', max_length=255, blank=True)),
('active', models.BooleanField(default=True, db_index=True)),
('name', models.CharField(
unique=True, max_length=512, db_index=True)),
('original_name', models.CharField(max_length=512)),
],
options={
'ordering': ('name',),
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.CreateModel(
name='ProviderNamespace',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('description', galaxy.main.fields.TruncatingCharField(
default='', max_length=255, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True, db_index=True)),
('name', models.CharField(
max_length=256, verbose_name='Name')),
('display_name', models.CharField(
verbose_name='Display Name', max_length=256,
null=True, editable=False, blank=True)),
('avatar_url', models.CharField(
verbose_name='Avatar URL', max_length=256,
null=True, editable=False, blank=True)),
('location', models.CharField(
verbose_name='Location', max_length=256,
null=True, editable=False, blank=True)),
('company', models.CharField(
verbose_name='Company Name', max_length=256,
null=True, editable=False, blank=True)),
('email', models.CharField(
verbose_name='Email Address', max_length=256,
null=True, editable=False, blank=True)),
('html_url', models.CharField(
verbose_name='Web Site URL', max_length=256,
null=True, editable=False, blank=True)),
('followers', models.IntegerField(
null=True, editable=False,
verbose_name="Followers")),
('namespace', models.ForeignKey(
related_name='namespaces', editable=False,
to='main.Namespace', null=True,
on_delete=models.CASCADE,
verbose_name='Namespace')),
('provider', models.ForeignKey(
related_name='provider', verbose_name='Provider',
on_delete=models.CASCADE,
to='main.Provider', null=True)),
],
options={
'ordering': ('provider', 'name',),
'unique_together': {('provider', 'name'),
('namespace', 'provider', 'name')},
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.RunSQL(
sql=COPY_NAMESPACE_DATA,
reverse_sql=migrations.RunSQL.noop
),
migrations.DeleteModel(
name='Namespace',
),
migrations.CreateModel(
name='Namespace',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('description', galaxy.main.fields.TruncatingCharField(
default='', max_length=255, blank=True)),
('active', models.BooleanField(default=True, db_index=True)),
('name', models.CharField(
unique=True, max_length=512, db_index=True)),
('original_name', models.CharField(max_length=512)),
('avatar_url', models.CharField(
max_length=256, null=True,
verbose_name='Avatar URL', blank=True)),
('location', models.CharField(
max_length=256, null=True,
verbose_name='Location', blank=True)),
('company', models.CharField(
max_length=256, null=True,
verbose_name='Company Name', blank=True)),
('email', models.CharField(
max_length=256, null=True,
verbose_name='Email Address', blank=True)),
('html_url', models.CharField(
max_length=256, null=True,
verbose_name='Web Site URL', blank=True)),
('owners', models.ManyToManyField(
related_name='namespaces', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.RunSQL(sql=(
ADD_GITHUB_PROVIDER,
ADD_REPO_GITHUB_USERS,
ADD_ROLE_NAMESPACE,
NAMESPACE_FROM_PROVIDER_NAMESPACE,
SET_PROVIDER_NAMESPACE_FK,
ADD_NAMESPACE_OWNERS,
ADD_MISSING_OWNERS,
)),
]
|
516cd2a9ee07c2a700486ae9d0a26733b92417cf
|
ed01f578313f385496bd816a1c2206d5cdc6b090
|
/tests/test_surface.py
|
75d0749ec2b7b4240125ecd404f7f67f827d6d9f
|
[
"BSD-3-Clause"
] |
permissive
|
oddt/oddt
|
5b4204d5abafa8d3947e0ab216bb29ebb98203ff
|
bfbf9ea99768b556684dc99c6ac87a9f16b16f80
|
refs/heads/master
| 2022-12-27T07:55:56.493227
| 2022-01-07T18:07:47
| 2022-02-03T17:55:07
| 23,150,687
| 363
| 118
|
BSD-3-Clause
| 2022-02-03T17:55:09
| 2014-08-20T14:01:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,818
|
py
|
test_surface.py
|
import os
from distutils.version import LooseVersion
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from skimage import __version__ as skimage_version
import oddt
from oddt.surface import (generate_surface_marching_cubes,
find_surface_residues)
test_data_dir = os.path.dirname(os.path.abspath(__file__))
protein = next(oddt.toolkit.readfile('pdb', os.path.join(
test_data_dir, 'data/dude/xiap/receptor_rdkit.pdb')))
protein.protein = True
protein.addh(only_polar=True)
def test_generate_surface_marching_cubes():
"""Tests generating surfaces"""
verts1, faces1 = generate_surface_marching_cubes(protein, scaling=1., probe_radius=1.4, remove_hoh=False)
verts2, faces2 = generate_surface_marching_cubes(protein, scaling=2., probe_radius=1.4, remove_hoh=False)
verts3, faces3 = generate_surface_marching_cubes(protein, scaling=1., probe_radius=1.4, remove_hoh=True)
verts4, faces4 = generate_surface_marching_cubes(protein, scaling=1., probe_radius=0, remove_hoh=True)
# Higher scaling should result in a higher number of vertices
assert len(verts2) > len(verts1), ('Higher scaling should result in '
'a higher number of vertices')
# versions of skimage older than 0.12 use a slightly different version of the marching cubes algorithm
# producing slightly different results
if LooseVersion(skimage_version) >= LooseVersion('0.13'):
if (oddt.toolkit.backend == 'ob' or
oddt.toolkit.backend == 'rdk' and oddt.toolkits.rdk.__version__ >= '2019.09'):
ref_vert_shape_1 = (9040, 3)
ref_face_shape_1 = (18094, 3)
ref_vert_shape_2 = (35950, 3)
ref_face_shape_2 = (71926, 3)
ref_vert_shape_3 = (9040, 3)
ref_face_shape_3 = (18094, 3)
ref_vert_shape_4 = (14881, 3)
ref_face_shape_4 = (30468, 3)
else:
ref_vert_shape_1 = (9044, 3)
ref_face_shape_1 = (18102, 3)
ref_vert_shape_2 = (35788, 3)
ref_face_shape_2 = (71578, 3)
ref_vert_shape_3 = (9044, 3)
ref_face_shape_3 = (18102, 3)
ref_vert_shape_4 = (15035, 3)
ref_face_shape_4 = (30848, 3)
else:
if oddt.toolkit.backend == 'ob':
ref_vert_shape_1 = (5923, 3)
ref_face_shape_1 = (11862, 3)
ref_vert_shape_2 = (20819, 3)
ref_face_shape_2 = (41634, 3)
ref_vert_shape_3 = (5923, 3)
ref_face_shape_3 = (11862, 3)
ref_vert_shape_4 = (10263, 3)
ref_face_shape_4 = (21658, 3)
else:
ref_vert_shape_1 = (5916, 3)
ref_face_shape_1 = (11848, 3)
ref_vert_shape_2 = (20845, 3)
ref_face_shape_2 = (41686, 3)
ref_vert_shape_3 = (5916, 3)
ref_face_shape_3 = (11848, 3)
ref_vert_shape_4 = (10243, 3)
ref_face_shape_4 = (21686, 3)
assert ref_vert_shape_1 == verts1.shape
assert ref_face_shape_1 == faces1.shape
assert ref_vert_shape_2 == verts2.shape
assert ref_face_shape_2 == faces2.shape
assert ref_vert_shape_3 == verts3.shape
assert ref_face_shape_3 == faces3.shape
assert ref_vert_shape_4 == verts4.shape
assert ref_face_shape_4 == faces4.shape
with pytest.raises(TypeError):
generate_surface_marching_cubes(molecule=1)
with pytest.raises(ValueError):
generate_surface_marching_cubes(molecule=protein, probe_radius=-1)
with pytest.raises(ValueError):
generate_surface_marching_cubes(molecule=protein, scaling=0.1)
def test_find_surface_residues():
"""Tests finding residues on the surface"""
atom_dict_0 = find_surface_residues(protein, max_dist=0, scaling=1)
atom_dict_1 = find_surface_residues(protein, max_dist=2, scaling=1)
atom_dict_2 = find_surface_residues(protein, max_dist=3, scaling=1)
atom_dict_3 = find_surface_residues(protein, max_dist=None, scaling=1)
atom_dict_4 = find_surface_residues(protein, max_dist=None, scaling=2)
assert atom_dict_0.size == 0
assert len(atom_dict_1) > len(atom_dict_0), ('Increasing max_dist should '
'result in more/equal number '
'of atoms found')
assert len(atom_dict_2) >= len(atom_dict_1), ('Increasing max_dist should '
'result in more/equal number '
'of atoms found')
assert_array_equal(np.intersect1d(atom_dict_1['id'], atom_dict_2['id']), atom_dict_1['id'])
if oddt.toolkit.backend == 'ob':
ref_len_1 = 762
ref_len_2 = 968
ref_len_3 = 654
ref_len_4 = 379
elif oddt.toolkit.backend == 'rdk' and oddt.toolkits.rdk.__version__ >= '2019.09':
ref_len_1 = 762
ref_len_2 = 968
ref_len_3 = 664
ref_len_4 = 393
else:
ref_len_1 = 759
ref_len_2 = 966
ref_len_3 = 735
ref_len_4 = 489
assert len(atom_dict_1) == ref_len_1
assert len(atom_dict_2) == ref_len_2
assert len(atom_dict_3) == ref_len_3
assert len(atom_dict_4) == ref_len_4
# Adding hydrogen atoms should have no effect on the result
protein.addh()
atom_dict_addh = find_surface_residues(protein, max_dist=2, scaling=1)
assert_array_equal(atom_dict_addh['id'], atom_dict_1['id'])
with pytest.raises(TypeError):
find_surface_residues(molecule=1)
with pytest.raises(ValueError):
find_surface_residues(molecule=protein, max_dist='a')
with pytest.raises(ValueError):
find_surface_residues(molecule=protein, max_dist=[1, 1, 1])
|
47e447eef03cc6cf44e32727735032df1e34c375
|
863bfa36852b84e2e2834abb67171e37d48b9b81
|
/returns/interfaces/altable.py
|
8f6ee31cf25d16df247a1f080f6f66f465cc5fd6
|
[
"BSD-2-Clause"
] |
permissive
|
dry-python/returns
|
33f763a34439cb6aa5419f16c6f45f27610d82d4
|
ae8d9ffaf20c459296337b78ba5ecb2f98870f61
|
refs/heads/master
| 2023-09-04T07:33:39.768675
| 2023-09-01T20:45:59
| 2023-09-01T20:45:59
| 167,689,891
| 2,967
| 130
|
BSD-2-Clause
| 2023-09-11T15:18:58
| 2019-01-26T13:08:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
altable.py
|
from abc import abstractmethod
from typing import (
Callable,
ClassVar,
Generic,
NoReturn,
Sequence,
TypeVar,
final,
)
from returns.functions import compose, identity
from returns.primitives.asserts import assert_equal
from returns.primitives.hkt import KindN
from returns.primitives.laws import (
Law,
Law1,
Law3,
Lawful,
LawSpecDef,
law_definition,
)
_FirstType = TypeVar('_FirstType')
_SecondType = TypeVar('_SecondType')
_ThirdType = TypeVar('_ThirdType')
_UpdatedType = TypeVar('_UpdatedType')
_AltableType = TypeVar('_AltableType', bound='AltableN')
# Used in laws:
_NewType1 = TypeVar('_NewType1')
_NewType2 = TypeVar('_NewType2')
@final
class _LawSpec(LawSpecDef):
"""
Mappable or functor laws.
https://en.wikibooks.org/wiki/Haskell/The_Functor_class#The_functor_laws
"""
__slots__ = ()
@law_definition
def identity_law(
altable: 'AltableN[_FirstType, _SecondType, _ThirdType]',
) -> None:
"""Mapping identity over a value must return the value unchanged."""
assert_equal(altable.alt(identity), altable)
@law_definition
def associative_law(
altable: 'AltableN[_FirstType, _SecondType, _ThirdType]',
first: Callable[[_SecondType], _NewType1],
second: Callable[[_NewType1], _NewType2],
) -> None:
"""Mapping twice or mapping a composition is the same thing."""
assert_equal(
altable.alt(first).alt(second),
altable.alt(compose(first, second)),
)
class AltableN(
Generic[_FirstType, _SecondType, _ThirdType],
Lawful['AltableN[_FirstType, _SecondType, _ThirdType]'],
):
"""Modifies the second type argument with a pure function."""
__slots__ = ()
_laws: ClassVar[Sequence[Law]] = (
Law1(_LawSpec.identity_law),
Law3(_LawSpec.associative_law),
)
@abstractmethod
def alt(
self: _AltableType,
function: Callable[[_SecondType], _UpdatedType],
) -> KindN[_AltableType, _FirstType, _UpdatedType, _ThirdType]:
"""Allows to run a pure function over a container."""
#: Type alias for kinds with two type arguments.
Altable2 = AltableN[_FirstType, _SecondType, NoReturn]
#: Type alias for kinds with three type arguments.
Altable3 = AltableN[_FirstType, _SecondType, _ThirdType]
|
5858ba698418c11b327cd1f2afcca0e1b38e08e3
|
b71a6e7050b0a4368007350d91ee078288a7318c
|
/examples/other/testMen.py
|
e0117b4206bc80c454ff3be915e77067bc43e863
|
[
"Apache-2.0"
] |
permissive
|
jarvisteach/appJar
|
2dfd0da6cb85ea3535379ed000efd97fb42fe4f8
|
0b59ce041da2197dcff3410e20f298676f1f7266
|
refs/heads/appJar
| 2023-08-29T09:42:01.812005
| 2019-09-28T18:34:06
| 2019-09-28T18:34:06
| 39,996,518
| 696
| 103
|
NOASSERTION
| 2023-02-20T01:01:16
| 2015-07-31T08:59:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
testMen.py
|
from tkinter import *
import os
def ShowHelp(e=None):
print("help")
def showHelpDialog(e=None):
print("help")
class App(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
parent.title("Menu Tester")
# create menu bar object
menubar = Menu(parent)
# create two mewnus
menu_file = Menu(menubar)
menu_edit = Menu(menubar)
# make them each cascade
menubar.add_cascade(menu=menu_file, label='File')
menubar.add_cascade(menu=menu_edit, label='Edit')
# add three options
menu_file.add_command(label='New', command=ShowHelp)
menu_file.add_command(label='Open...', command=ShowHelp)
menu_file.add_command(label='Close', command=ShowHelp)
# add a seperator
menu_file.add_separator()
# add checkboxes
check = StringVar()
menu_file.add_checkbutton(label='Check', variable=check, onvalue=1, offvalue=0)
radio = StringVar()
menu_file.add_radiobutton(label='One', variable=radio, value=1)
menu_file.add_radiobutton(label='Two', variable=radio, value=2)
appmenu = Menu(menubar, name='apple')
menubar.add_cascade(menu=appmenu)
appmenu.add_command(label='About My Application')
appmenu.add_separator()
def showMyPreferencesDialog():
print("pref")
root.createcommand('tk::mac::ShowPreferences', showMyPreferencesDialog)
# the MAC specifi HELP Menu
helpmenu = Menu(menubar, name='help')
menubar.add_cascade(menu=helpmenu, label='Help')
menubar.add_command(label='This one', command=ShowHelp)
root.createcommand('tk::mac::ShowHelp', showHelpDialog)
windowmenu = Menu(menubar, name='window')
menubar.add_cascade(menu=windowmenu, label='Window')
# start the menu bar
root.config(menu=menubar)
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python3" to true' ''')
root=Tk()
root.geometry("300x250+300+300")
app=App(root)
root.mainloop()
|
65f6cf261d65bd60ae4ff205974785710bcc7eb7
|
f791462fb1286607d16459c1602d133f8d8c8b59
|
/test/test_optimizers.py
|
84dab38815d7c39a3e83ea2f7bd4322bf3bc1e36
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/numpyro
|
b071ed2bd93be41bafc3da8764c9f5617f996d92
|
ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2
|
refs/heads/master
| 2023-09-03T15:56:13.252692
| 2023-08-28T14:32:25
| 2023-08-28T14:32:25
| 170,580,540
| 1,941
| 219
|
Apache-2.0
| 2023-09-04T11:26:11
| 2019-02-13T21:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,332
|
py
|
test_optimizers.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import pytest
from jax import grad, jit
import jax.numpy as jnp
from numpyro import optim
try:
import optax
# the optimizer test is parameterized by different optax optimizers, but we have
# to define them here to ensure that `optax` is defined. pytest.mark.parameterize
# decorators are run even if tests are skipped at the top of the file.
optax_optimizers = [
(optax.adam, (1e-2,), {}),
# clipped adam
(optax.chain, (optax.clip(10.0), optax.adam(1e-2)), {}),
(optax.adagrad, (1e-1,), {}),
# SGD with momentum
(optax.sgd, (1e-2,), {"momentum": 0.9}),
(optax.rmsprop, (1e-2,), {"decay": 0.95}),
# RMSProp with momentum
(optax.rmsprop, (1e-4,), {"decay": 0.9, "momentum": 0.9}),
(optax.sgd, (1e-2,), {}),
]
except ImportError:
pytestmark = pytest.mark.skip(reason="optax is not installed")
optax_optimizers = []
def loss(params):
return jnp.sum(params["x"] ** 2 + params["y"] ** 2)
@partial(jit, static_argnums=(1,))
def step(opt_state, optim):
params = optim.get_params(opt_state)
g = grad(loss)(params)
return optim.update(g, opt_state)
@pytest.mark.parametrize(
"optim_class, args, kwargs",
[
(optim.Adam, (1e-2,), {}),
(optim.ClippedAdam, (1e-2,), {}),
(optim.Adagrad, (1e-1,), {}),
(optim.Momentum, (1e-2, 0.5), {}),
(optim.RMSProp, (1e-2, 0.95), {}),
(optim.RMSPropMomentum, (1e-4,), {}),
(optim.SGD, (1e-2,), {}),
]
+ optax_optimizers,
)
@pytest.mark.filterwarnings("ignore:.*tree_multimap:FutureWarning")
def test_optim_multi_params(optim_class, args, kwargs):
params = {"x": jnp.array([1.0, 1.0, 1.0]), "y": jnp.array([-1, -1.0, -1.0])}
opt = optim_class(*args, **kwargs)
if not isinstance(opt, optim._NumPyroOptim):
opt = optim.optax_to_numpyro(opt)
opt_state = opt.init(params)
for i in range(2000):
opt_state = step(opt_state, opt)
for _, param in opt.get_params(opt_state).items():
assert jnp.allclose(param, jnp.zeros(3))
# note: this is somewhat of a bruteforce test. testing directly from
# _NumpyroOptim would probably be better
@pytest.mark.parametrize(
"optim_class, args, kwargs",
[
(optim.Adam, (1e-2,), {}),
(optim.ClippedAdam, (1e-2,), {}),
(optim.Adagrad, (1e-1,), {}),
(optim.Momentum, (1e-2, 0.5), {}),
(optim.RMSProp, (1e-2, 0.95), {}),
(optim.RMSPropMomentum, (1e-4,), {}),
(optim.SGD, (1e-2,), {}),
]
+ optax_optimizers,
)
@pytest.mark.filterwarnings("ignore:.*tree_multimap:FutureWarning")
def test_numpyrooptim_no_double_jit(optim_class, args, kwargs):
opt = optim_class(*args, **kwargs)
if not isinstance(opt, optim._NumPyroOptim):
opt = optim.optax_to_numpyro(opt)
state = opt.init(jnp.zeros(10))
my_fn_calls = 0
@jit
def my_fn(state, g):
nonlocal my_fn_calls
my_fn_calls += 1
state = opt.update(g, state)
return state
state = my_fn(state, jnp.ones(10) * 1.0)
state = my_fn(state, jnp.ones(10) * 2.0)
state = my_fn(state, jnp.ones(10) * 3.0)
assert my_fn_calls == 1
|
3792578ea46a8c05b1547f7ba9c0cd0d1b047e1c
|
86bd1a9b92ffe3edb1982515be7d482584b990ba
|
/tcconfig/_network.py
|
bb01da8699258dcba1cdbde434d390c941283df1
|
[
"MIT"
] |
permissive
|
thombashi/tcconfig
|
e9202538382d371f34190947cbd3ac0b7332c4db
|
e14f9fbdb235e2d6b2748962735dcf3fa1f356f3
|
refs/heads/master
| 2023-08-19T22:51:56.590952
| 2022-09-26T13:42:13
| 2022-09-26T13:42:13
| 49,825,187
| 753
| 86
|
MIT
| 2021-12-12T03:54:10
| 2016-01-17T16:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
_network.py
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import humanreadable as hr
import typepy
from pyroute2 import IPRoute
from ._const import Network
from ._error import NetworkInterfaceNotFoundError
def get_anywhere_network(ip_version):
ip_version_n = typepy.Integer(ip_version).try_convert()
if ip_version_n == 4:
return Network.Ipv4.ANYWHERE
if ip_version_n == 6:
return Network.Ipv6.ANYWHERE
raise ValueError("unknown ip version: {}".format(ip_version))
def _get_iproute2_upper_limite_rate():
"""
:return: Upper bandwidth rate limit of iproute2 [Kbps].
:rtype: int
"""
# upper bandwidth rate limit of iproute2 was 34,359,738,360
# bits per second older than 3.14.0
# http://git.kernel.org/cgit/linux/kernel/git/shemminger/iproute2.git/commit/?id=8334bb325d5178483a3063c5f06858b46d993dc7
return hr.BitPerSecond("32Gbps")
def _read_iface_speed(tc_device):
with open("/sys/class/net/{:s}/speed".format(tc_device)) as f:
return int(f.read().strip())
def get_upper_limit_rate(tc_device):
if typepy.is_null_string(tc_device):
return _get_iproute2_upper_limite_rate()
try:
speed_value = _read_iface_speed(tc_device)
except OSError:
return _get_iproute2_upper_limite_rate()
if speed_value < 0:
# default to the iproute2 upper limit when speed value is -1 in
# paravirtualized network interfaces
return _get_iproute2_upper_limite_rate()
return min(hr.BitPerSecond("{}Mbps".format(speed_value)), _get_iproute2_upper_limite_rate())
def is_anywhere_network(network, ip_version):
try:
network = network.strip()
except AttributeError as e:
raise ValueError(e)
if ip_version == 4:
return network == get_anywhere_network(ip_version)
if ip_version == 6:
return network in (get_anywhere_network(ip_version), "0:0:0:0:0:0:0:0/0")
raise ValueError("invalid ip version: {}".format(ip_version))
def sanitize_network(network, ip_version):
"""
:return: Network string
:rtype: str
:raises ValueError: if the network string is invalid.
"""
import ipaddress
if typepy.is_null_string(network) or network.casefold() == "anywhere":
return get_anywhere_network(ip_version)
try:
if ip_version == 4:
ipaddress.IPv4Address(network)
return network + "/32"
if ip_version == 6:
return ipaddress.IPv6Address(network).compressed
except ipaddress.AddressValueError:
pass
# validate network str ---
if ip_version == 4:
return ipaddress.IPv4Network(str(network)).compressed
if ip_version == 6:
return ipaddress.IPv6Network(str(network)).compressed
raise ValueError("unexpected ip version: {}".format(ip_version))
def verify_network_interface(device, tc_command_output):
from ._common import is_execute_tc_command
if not is_execute_tc_command(tc_command_output):
return
with IPRoute() as ipr:
avail_interfaces = [link.get_attr("IFLA_IFNAME") for link in ipr.get_links()]
if device not in avail_interfaces:
raise NetworkInterfaceNotFoundError(target=device)
|
2d7730d80ec1ba72d05eabc61dfafe19af637caa
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/autode/plotting.py
|
e443c493f48feebcf845156a6e52f1f885c41c4b
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 16,265
|
py
|
plotting.py
|
import os
import numpy as np
from typing import Sequence, Union, TYPE_CHECKING, List, Optional, Any, Tuple
from scipy import interpolate
from autode.values import Energy
from autode.exceptions import CouldNotPlotSmoothProfile
from scipy.optimize import minimize
from autode.config import Config
from autode.units import energy_unit_from_name
from autode.log import logger
if TYPE_CHECKING:
from autode.reactions.reaction import Reaction
from autode.units import Unit
from autode.opt.optimisers.base import OptimiserHistory
from matplotlib.figure import Figure
def save_plot(figure: "Figure", filename: str, **kwargs):
"""
Save a pyplot figure
Args:
figure (matplotlib.figure.Figure): The matplotlib figure object
filename (str): Name of the file to plot
**kwargs : Other keyword arguments for matplotlib which
are passed onto figure.savefig()
"""
import matplotlib.pyplot as plt
if os.path.exists(filename):
logger.warning("Plot already exists. Overriding..")
os.remove(filename)
dpi = 400 if Config.high_quality_plots else 100
figure.savefig(filename, dpi=dpi, **kwargs)
plt.close(figure)
return None
def plot_reaction_profile(
reactions: Sequence["Reaction"],
units: Union["Unit", str],
name: str,
free_energy: bool = False,
enthalpy: bool = False,
) -> None:
"""
For a set of reactions plot the reaction profile using matplotlib
---------------------------------------------------------------------------
Arguments:
reactions (list((autode.reaction.Reaction)):
units (autode.units.Units | str):
name (str):
free_energy (bool): Plot the free energy profile (G)
enthalpy (bool): Plot the enthalpic profile (H)
"""
import matplotlib.pyplot as plt
logger.info("Plotting reaction profile")
if free_energy and enthalpy:
raise AssertionError("Cannot plot a profile in both G and H")
if isinstance(units, str):
units = energy_unit_from_name(name=units)
fig, ax = plt.subplots()
# Get the energies for the reaction profile (y values) plotted against the
# reaction coordinate (zi_s)
energies = calculate_reaction_profile_energies(
reactions, units=units, free_energy=free_energy, enthalpy=enthalpy
)
zi_s = np.array(range(len(energies)))
try:
plot_smooth_profile(zi_s, energies, ax=ax)
except CouldNotPlotSmoothProfile:
plot_points(zi_s, energies, ax=ax)
ec = "E"
if free_energy:
ec = "G"
elif enthalpy:
ec = "H"
plt.ylabel(f"∆${ec}$ / {units.plot_name}", fontsize=12)
plt.xlabel("Reaction coordinate")
energy_values = [energy for energy in energies]
max_delta = max(energy_values) - min(energy_values)
plt.ylim(
min(energy_values) - 0.09 * max_delta,
max(energy_values) + 0.09 * max_delta,
)
plt.xticks([])
plt.subplots_adjust(top=0.95, right=0.95)
fig.text(
0.1,
0.05,
get_reaction_profile_warnings(reactions),
ha="left",
fontsize=6,
wrap=True,
)
prefix = "" if name == "reaction" else f"{name}_"
return save_plot(fig, filename=f"{prefix}reaction_profile.pdf")
def plot_smooth_profile(zi_s, energies, ax):
"""
Plot a smooth reaction profile by spline interpolation and finding the
stationary points. This will not afford the correct number of stationary
points for some energy arrays, so raise an exception if it fails
---------------------------------------------------------------------------
Arguments:
zi_s (np.ndarray): Estimate of reaction coordinate points
energies (list(autode.plotting.Energy)): len(energies) = len(zi_s)
ax (matplotlib.axes.Axes):
"""
# Minimise a set of spline points so the stationary points have y values
# given in the energies array
energies_arr = np.array([energy for energy in energies], dtype="f")
result = minimize(
error_on_stationary_points,
x0=energies_arr,
args=(energies_arr,),
method="BFGS",
tol=0.1,
)
# Use the optimised values to construct a spline function that will be
# plotted
optimised_spline = interpolate.CubicSpline(
zi_s, result.x, bc_type="clamped"
)
# Create more zi values from slightly before the minimum to slightly after
# the maximum
fine_zi_s = np.linspace(min(zi_s) - 0.2, max(zi_s) + 0.2, num=500)
# The new zi values are the stationary points of the optimised function
zi_s = get_stationary_points(fine_zi_s, optimised_spline.derivative())
if len(zi_s) != len(energies):
raise CouldNotPlotSmoothProfile
# Plot the function
ax.plot(fine_zi_s, optimised_spline(fine_zi_s), c="k")
ax.scatter(zi_s, optimised_spline(zi_s), c="b", zorder=10)
# Annotate the plot with the relative energies
max_delta = max(energies) - min(energies)
for i, energy in enumerate(optimised_spline(zi_s)):
if energies[i].is_estimated:
# Don't add estimated energies
continue
# Shift the minima labels (even points) below the point and the
# transition state labels above the point
shift = -0.07 * max_delta if i % 2 == 0 else 0.03 * max_delta
ax.annotate(
f"{energy:.1f}",
(zi_s[i], energy + shift),
fontsize=12,
ha="center",
)
return None
def plot_points(zi_s, energies, ax):
"""
Plot a reaction profile just adding the points to the graph
---------------------------------------------------------------------------
Arguments:
zi_s (np.ndarray): Estimate of reaction coordinate points
energies (list(autode.plotting.Energy)): len(energies) = len(zi_s)
ax (matplotlib.axes.Axes):
"""
energies_arr = np.array([energy for energy in energies])
ax.plot(zi_s, energies_arr, ls="--", c="k", marker="o")
# Annotate the plot with the relative energies
for i, energy in enumerate(energies):
if hasattr(energy, "estimated") and energy.is_estimated:
# Don't add estimated energies
continue
ax.annotate(
f"{np.round(energies_arr[i], 1)}",
(zi_s[i], energies_arr[i] + 0.7),
fontsize=12,
ha="center",
)
return None
def get_reaction_profile_warnings(reactions):
"""
Get a string of warnings for a reaction
---------------------------------------------------------------------------
Arguments:
reactions (list(autode.reaction.Reaction)):
Returns:
(str): List of warnings to annotate the plot with
"""
logger.info("Getting warnings for reaction profile")
warnings = ""
for reaction in reactions:
if reaction.delta("E") is None:
warnings += (
f"∆Er not calculated for {reaction.name}, "
f"∆Er = 0 assumed. "
)
de_ts = reaction.delta("E‡")
if de_ts is None or (de_ts is not None and de_ts.is_estimated):
warnings += (
f"∆E‡ not calculated for {reaction.name}, "
f"barrierless reaction assumed. "
)
if reaction.ts is not None:
if reaction.ts.has_imaginary_frequencies:
n_imag_freqs = len(reaction.ts.imaginary_frequencies)
if n_imag_freqs != 1:
warnings += (
f"TS for {reaction.name} has {n_imag_freqs} "
f"imaginary frequencies. "
)
warnings += reaction.ts.warnings
# If no strings were added then there are no warnings
if len(warnings) == 0:
warnings = "None"
return f"WARNINGS: {warnings}"
def calculate_reaction_profile_energies(
reactions, units, free_energy=False, enthalpy=False
):
"""
Calculate a list of energies comprising the reaction profile
---------------------------------------------------------------------------
Arguments:
reactions (list(autode.reaction.Reaction)):
units (autode.units.Units):
Keyword Arguments:
free_energy (bool): Calculate ∆Gs
enthalpy (bool): Calculate ∆Hs
Returns:
(np.ndarray(autode.plotting.Energy))
"""
# Populate a list of reaction relative energies
# [reactants -> TS -> products], all floats
reaction_energies = []
energy_type = "H" if enthalpy else ("G" if free_energy else "E")
for reaction in reactions:
de = reaction.delta(energy_type)
# If ∆Er cannot be calculated then assume isoenergetic and add a
# warning to the plot
if de is None:
de = Energy(0.0, estimated=True)
de_ts = reaction.delta(f"{energy_type}‡")
# If there is no ∆E then de_ts could be None. Use the Effective free
# energy barrier of 4.35 kcal mol-1
if de_ts is None:
de_ts = Energy(0.00694, units="Ha", estimated=True)
reaction_energies.append([Energy(0.0), de_ts, de])
# Construct the full list of energies, referenced to the first set of
# reactants
energies = reaction_energies[0]
for i in range(1, len(reaction_energies)):
# Add the energies from the next TS and the next product reaction_
# energies[i][0] == energies[-1
energies += [
reaction_energies[i][1] + energies[-1],
reaction_energies[i][2] + energies[-1],
]
return [energy * units.conversion for energy in energies]
def get_stationary_points(xs, dydx):
"""
Compute the productive of the derivative at points x(i-1) and x(i) which
is negative if there is a point x(k)
between x(i-1) and x(i) that has dy/dx|x(k) = 0
---------------------------------------------------------------------------
Arguments:
xs (np.ndarray):
dydx (function):
"""
stationary_points = []
for i in range(1, len(xs) - 1):
if dydx(xs[i - 1]) * dydx(xs[i]) < 0:
stationary_points.append(xs[i])
return stationary_points
def error_on_stationary_points(x, energies):
"""
Calculate the difference between the stationary points of an interpolated
function and those observed (given in the energies array). Example::
| .
E |. / | The points indicate the true stationary points
| |_/ |.
|_____________
zi
---------------------------------------------------------------------------
Arguments:
x (np.ndarray): Points that will be splined that generate stationary
points that ≈ energies
energies (np.ndarray): Observed stationary points
Returns:
(float): A measure of the error
"""
# Generate a list of reaction coordinate points - arbitrary units so
# integers are fine
zi_s = np.array(range(len(x)))
# Spline the energies to get a function that has stationary points
spline = interpolate.CubicSpline(zi_s, x, bc_type="clamped")
# Calculate the energy values at the stationary points of the function with
# a fine-ish spacing that extrapolates
# slightly
fine_zi_s = np.linspace(min(zi_s) - 0.2, max(zi_s) + 0.2, num=500)
stationary_points = get_stationary_points(
xs=fine_zi_s, dydx=spline.derivative()
)
if len(stationary_points) != len(energies):
# TODO make this smooth somehow
# Energy penalty for not having the required number of
return 10 * np.abs(len(energies) - len(stationary_points))
energies_at_stationary_points = [spline(zi) for zi in stationary_points]
# Return the error as the sum squared difference between the required and
# the observed stationary point energies
energy_difference = energies - np.array(energies_at_stationary_points)
return np.sum(np.square(energy_difference))
def plot_optimiser_profile(
history: "OptimiserHistory",
plot_energy: bool,
plot_rms_grad: bool,
filename: str,
):
"""
Plot the energy and RMS gradient profile from an optimiser history.
Skips plotting of points where energy/grad is not available
-------------------------------------------------------------------------
Args:
history (OptimiserHistory): History (list) of coordinate objects
plot_energy (bool): Whether to plot energy or not
plot_rms_grad (bool): Whether to plot rms grad or not
filename (str): Name of plotted file
"""
if not (plot_energy or plot_rms_grad):
logger.error(
"Must plot either energies or RMS gradients for an"
" optimiser profile"
)
return None
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
x_axis = [i + 1 for i in range(len(history))] # starts at 0
energies = []
rms_grads = []
for coord in history:
if coord.e is not None:
energies.append(coord.e.to("Ha"))
else:
energies.append(np.nan)
if coord.g is not None:
rms = np.sqrt(np.average(np.square(coord.to("cart").g)))
rms_grads.append(rms)
else:
rms_grads.append(np.nan)
fig, ax = plt.subplots()
if plot_energy:
ax.plot(
x_axis, energies, "o-", color="C0", label="Electronic energy"
) # blue
ax.set_xlabel("Optimiser step")
ax.set_ylabel("Electronic energy / Ha")
ax.set_xlim(left=0.5)
ax.xaxis.set_major_locator(
MaxNLocator(nbins="auto", steps=[1, 2, 2.5, 5, 10], integer=True)
)
if plot_rms_grad:
# plot on a different axis if both are present
ax2 = ax.twinx() if plot_energy else ax
ax2.plot(
x_axis, rms_grads, "o:", color="C3", label="RMS gradient"
) # red
ax2.set_ylabel("RMS of gradient / Ha(Å)^-1")
fig.legend(
loc="upper right", bbox_to_anchor=(1, 1), bbox_transform=ax.transAxes
)
# bbox_inches="tight" uses tight bounding box, which prevents labels cutting out
save_plot(fig, filename, bbox_inches="tight")
def plot_bracket_method_energy_profile(
filename: str,
left_points: List[Tuple[int, Energy]],
cineb_point: Optional[tuple],
right_points: List[Tuple[int, Energy]],
x_title: str,
) -> None:
"""
Plot the energy profile from a bracketing method run, showing the
points from left and right image and final CI-NEB (if done), in
different colours. Energies should be in kcal/mol.
Args:
filename (str): Filename with extension
left_points (list[tuple]): List of tuples containing position and
energies from left image
cineb_point (tuple|None): Tuple with position and energy for CI-NEB peak
right_points (list[tuple]): List of tuples containing position and
energies from right image
x_title (str): Title of the x-axis
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# the data should be cast into kcal/mol
kcalmol = energy_unit_from_name("kcalmol")
left_x = [point[0] for point in left_points]
left_y = [point[1].to(kcalmol) for point in left_points]
ax.plot(left_x, left_y, "bo-", label="initial image")
right_x = [point[0] for point in right_points]
right_y = [point[1].to(kcalmol) for point in right_points]
ax.plot(right_x, right_y, "go-", label="final image")
# plot the CI-NEB point and join it to the ends
if cineb_point is not None:
ax.plot(
[left_x[-1], cineb_point[0], right_x[0]],
[
left_y[-1].to(kcalmol),
cineb_point[1].to(kcalmol),
right_y[0].to(kcalmol),
],
"ro-",
label="CI-NEB",
)
ax.set_xlabel(x_title)
ax.set_ylabel(f"Electronic energy / {kcalmol.plot_name}")
ax.legend()
save_plot(fig, filename=filename)
return None
|
0baac9a14961c0a7261e104a4221c454934a06ae
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/aks/azure-mgmt-devspaces/azure/mgmt/devspaces/models/_models_py3.py
|
c20512f46cee1c4dffba718dd4ef9f7b8467bd97
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 23,118
|
py
|
_models_py3.py
|
# coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from .. import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class ContainerHostMapping(_serialization.Model):
"""Container host mapping object specifying the Container host resource ID and its associated
Controller resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar container_host_resource_id: ARM ID of the Container Host resource.
:vartype container_host_resource_id: str
:ivar mapped_controller_resource_id: ARM ID of the mapped Controller resource.
:vartype mapped_controller_resource_id: str
"""
_validation = {
"mapped_controller_resource_id": {"readonly": True},
}
_attribute_map = {
"container_host_resource_id": {"key": "containerHostResourceId", "type": "str"},
"mapped_controller_resource_id": {"key": "mappedControllerResourceId", "type": "str"},
}
def __init__(self, *, container_host_resource_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword container_host_resource_id: ARM ID of the Container Host resource.
:paramtype container_host_resource_id: str
"""
super().__init__(**kwargs)
self.container_host_resource_id = container_host_resource_id
self.mapped_controller_resource_id = None
class Resource(_serialization.Model):
"""An Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar tags: Tags for the Azure resource.
:vartype tags: dict[str, str]
:ivar location: Region where the Azure resource is located.
:vartype location: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, location: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword tags: Tags for the Azure resource.
:paramtype tags: dict[str, str]
:keyword location: Region where the Azure resource is located.
:paramtype location: str
"""
super().__init__(**kwargs)
self.tags = tags
self.location = location
class Controller(TrackedResource): # pylint: disable=too-many-instance-attributes
"""Controller.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar tags: Tags for the Azure resource.
:vartype tags: dict[str, str]
:ivar location: Region where the Azure resource is located.
:vartype location: str
:ivar sku: Model representing SKU for Azure Dev Spaces Controller. Required.
:vartype sku: ~azure.mgmt.devspaces.models.Sku
:ivar provisioning_state: Provisioning state of the Azure Dev Spaces Controller. Known values
are: "Succeeded", "Failed", "Canceled", "Updating", "Creating", "Deleting", and "Deleted".
:vartype provisioning_state: str or ~azure.mgmt.devspaces.models.ProvisioningState
:ivar host_suffix: DNS suffix for public endpoints running in the Azure Dev Spaces Controller.
:vartype host_suffix: str
:ivar data_plane_fqdn: DNS name for accessing DataPlane services.
:vartype data_plane_fqdn: str
:ivar target_container_host_api_server_fqdn: DNS of the target container host's API server.
:vartype target_container_host_api_server_fqdn: str
:ivar target_container_host_resource_id: Resource ID of the target container host. Required.
:vartype target_container_host_resource_id: str
:ivar target_container_host_credentials_base64: Credentials of the target container host
(base64). Required.
:vartype target_container_host_credentials_base64: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"sku": {"required": True},
"provisioning_state": {"readonly": True},
"host_suffix": {"readonly": True},
"data_plane_fqdn": {"readonly": True},
"target_container_host_api_server_fqdn": {"readonly": True},
"target_container_host_resource_id": {"required": True},
"target_container_host_credentials_base64": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"host_suffix": {"key": "properties.hostSuffix", "type": "str"},
"data_plane_fqdn": {"key": "properties.dataPlaneFqdn", "type": "str"},
"target_container_host_api_server_fqdn": {"key": "properties.targetContainerHostApiServerFqdn", "type": "str"},
"target_container_host_resource_id": {"key": "properties.targetContainerHostResourceId", "type": "str"},
"target_container_host_credentials_base64": {
"key": "properties.targetContainerHostCredentialsBase64",
"type": "str",
},
}
def __init__(
self,
*,
sku: "_models.Sku",
target_container_host_resource_id: str,
target_container_host_credentials_base64: str,
tags: Optional[Dict[str, str]] = None,
location: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: Tags for the Azure resource.
:paramtype tags: dict[str, str]
:keyword location: Region where the Azure resource is located.
:paramtype location: str
:keyword sku: Model representing SKU for Azure Dev Spaces Controller. Required.
:paramtype sku: ~azure.mgmt.devspaces.models.Sku
:keyword target_container_host_resource_id: Resource ID of the target container host. Required.
:paramtype target_container_host_resource_id: str
:keyword target_container_host_credentials_base64: Credentials of the target container host
(base64). Required.
:paramtype target_container_host_credentials_base64: str
"""
super().__init__(tags=tags, location=location, **kwargs)
self.sku = sku
self.provisioning_state = None
self.host_suffix = None
self.data_plane_fqdn = None
self.target_container_host_api_server_fqdn = None
self.target_container_host_resource_id = target_container_host_resource_id
self.target_container_host_credentials_base64 = target_container_host_credentials_base64
class ControllerConnectionDetails(_serialization.Model):
"""ControllerConnectionDetails.
:ivar orchestrator_specific_connection_details: Base class for types that supply values used to
connect to container orchestrators.
:vartype orchestrator_specific_connection_details:
~azure.mgmt.devspaces.models.OrchestratorSpecificConnectionDetails
"""
_attribute_map = {
"orchestrator_specific_connection_details": {
"key": "orchestratorSpecificConnectionDetails",
"type": "OrchestratorSpecificConnectionDetails",
},
}
def __init__(
self,
*,
orchestrator_specific_connection_details: Optional["_models.OrchestratorSpecificConnectionDetails"] = None,
**kwargs: Any
) -> None:
"""
:keyword orchestrator_specific_connection_details: Base class for types that supply values used
to connect to container orchestrators.
:paramtype orchestrator_specific_connection_details:
~azure.mgmt.devspaces.models.OrchestratorSpecificConnectionDetails
"""
super().__init__(**kwargs)
self.orchestrator_specific_connection_details = orchestrator_specific_connection_details
class ControllerConnectionDetailsList(_serialization.Model):
"""ControllerConnectionDetailsList.
:ivar connection_details_list: List of Azure Dev Spaces Controller connection details.
:vartype connection_details_list:
list[~azure.mgmt.devspaces.models.ControllerConnectionDetails]
"""
_attribute_map = {
"connection_details_list": {"key": "connectionDetailsList", "type": "[ControllerConnectionDetails]"},
}
def __init__(
self, *, connection_details_list: Optional[List["_models.ControllerConnectionDetails"]] = None, **kwargs: Any
) -> None:
"""
:keyword connection_details_list: List of Azure Dev Spaces Controller connection details.
:paramtype connection_details_list:
list[~azure.mgmt.devspaces.models.ControllerConnectionDetails]
"""
super().__init__(**kwargs)
self.connection_details_list = connection_details_list
class ControllerList(_serialization.Model):
"""ControllerList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Azure Dev Spaces Controllers.
:vartype value: list[~azure.mgmt.devspaces.models.Controller]
:ivar next_link: The URI that can be used to request the next page for list of Azure Dev Spaces
Controllers.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[Controller]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.Controller"]] = None, **kwargs: Any) -> None:
"""
:keyword value: List of Azure Dev Spaces Controllers.
:paramtype value: list[~azure.mgmt.devspaces.models.Controller]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class ControllerUpdateParameters(_serialization.Model):
"""Parameters for updating an Azure Dev Spaces Controller.
:ivar tags: Tags for the Azure Dev Spaces Controller.
:vartype tags: dict[str, str]
:ivar target_container_host_credentials_base64: Credentials of the target container host
(base64).
:vartype target_container_host_credentials_base64: str
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"target_container_host_credentials_base64": {
"key": "properties.targetContainerHostCredentialsBase64",
"type": "str",
},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
target_container_host_credentials_base64: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: Tags for the Azure Dev Spaces Controller.
:paramtype tags: dict[str, str]
:keyword target_container_host_credentials_base64: Credentials of the target container host
(base64).
:paramtype target_container_host_credentials_base64: str
"""
super().__init__(**kwargs)
self.tags = tags
self.target_container_host_credentials_base64 = target_container_host_credentials_base64
class DevSpacesErrorResponse(_serialization.Model):
"""Error response indicates that the service is not able to process the incoming request. The
reason is provided in the error message.
:ivar error: The details of the error.
:vartype error: ~azure.mgmt.devspaces.models.ErrorDetails
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorDetails"},
}
def __init__(self, *, error: Optional["_models.ErrorDetails"] = None, **kwargs: Any) -> None:
"""
:keyword error: The details of the error.
:paramtype error: ~azure.mgmt.devspaces.models.ErrorDetails
"""
super().__init__(**kwargs)
self.error = error
class ErrorDetails(_serialization.Model):
"""ErrorDetails.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Status code for the error.
:vartype code: str
:ivar message: Error message describing the error in detail.
:vartype message: str
:ivar target: The target of the particular error.
:vartype target: str
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
class OrchestratorSpecificConnectionDetails(_serialization.Model):
"""Base class for types that supply values used to connect to container orchestrators.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
KubernetesConnectionDetails
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar instance_type: Gets the Instance type. Required.
:vartype instance_type: str
"""
_validation = {
"instance_type": {"required": True, "readonly": True},
}
_attribute_map = {
"instance_type": {"key": "instanceType", "type": "str"},
}
_subtype_map = {"instance_type": {"Kubernetes": "KubernetesConnectionDetails"}}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.instance_type: Optional[str] = None
class KubernetesConnectionDetails(OrchestratorSpecificConnectionDetails):
"""Contains information used to connect to a Kubernetes cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar instance_type: Gets the Instance type. Required.
:vartype instance_type: str
:ivar kube_config: Gets the kubeconfig for the cluster.
:vartype kube_config: str
"""
_validation = {
"instance_type": {"required": True, "readonly": True},
}
_attribute_map = {
"instance_type": {"key": "instanceType", "type": "str"},
"kube_config": {"key": "kubeConfig", "type": "str"},
}
def __init__(self, *, kube_config: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword kube_config: Gets the kubeconfig for the cluster.
:paramtype kube_config: str
"""
super().__init__(**kwargs)
self.instance_type: str = "Kubernetes"
self.kube_config = kube_config
class ListConnectionDetailsParameters(_serialization.Model):
"""Parameters for listing connection details of an Azure Dev Spaces Controller.
All required parameters must be populated in order to send to Azure.
:ivar target_container_host_resource_id: Resource ID of the target container host mapped to the
Azure Dev Spaces Controller. Required.
:vartype target_container_host_resource_id: str
"""
_validation = {
"target_container_host_resource_id": {"required": True},
}
_attribute_map = {
"target_container_host_resource_id": {"key": "targetContainerHostResourceId", "type": "str"},
}
def __init__(self, *, target_container_host_resource_id: str, **kwargs: Any) -> None:
"""
:keyword target_container_host_resource_id: Resource ID of the target container host mapped to
the Azure Dev Spaces Controller. Required.
:paramtype target_container_host_resource_id: str
"""
super().__init__(**kwargs)
self.target_container_host_resource_id = target_container_host_resource_id
class ResourceProviderOperationDefinition(_serialization.Model):
"""ResourceProviderOperationDefinition.
:ivar name: Resource provider operation name.
:vartype name: str
:ivar display:
:vartype display: ~azure.mgmt.devspaces.models.ResourceProviderOperationDisplay
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display": {"key": "display", "type": "ResourceProviderOperationDisplay"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["_models.ResourceProviderOperationDisplay"] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Resource provider operation name.
:paramtype name: str
:keyword display:
:paramtype display: ~azure.mgmt.devspaces.models.ResourceProviderOperationDisplay
"""
super().__init__(**kwargs)
self.name = name
self.display = display
class ResourceProviderOperationDisplay(_serialization.Model):
"""ResourceProviderOperationDisplay.
:ivar provider: Name of the resource provider.
:vartype provider: str
:ivar resource: Name of the resource type.
:vartype resource: str
:ivar operation: Name of the resource provider operation.
:vartype operation: str
:ivar description: Description of the resource provider operation.
:vartype description: str
"""
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword provider: Name of the resource provider.
:paramtype provider: str
:keyword resource: Name of the resource type.
:paramtype resource: str
:keyword operation: Name of the resource provider operation.
:paramtype operation: str
:keyword description: Description of the resource provider operation.
:paramtype description: str
"""
super().__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ResourceProviderOperationList(_serialization.Model):
"""ResourceProviderOperationList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Resource provider operations list.
:vartype value: list[~azure.mgmt.devspaces.models.ResourceProviderOperationDefinition]
:ivar next_link: The URI that can be used to request the next page for list of Azure
operations.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[ResourceProviderOperationDefinition]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.ResourceProviderOperationDefinition"]] = None, **kwargs: Any
) -> None:
"""
:keyword value: Resource provider operations list.
:paramtype value: list[~azure.mgmt.devspaces.models.ResourceProviderOperationDefinition]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class Sku(_serialization.Model):
"""Model representing SKU for Azure Dev Spaces Controller.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the SKU for Azure Dev Spaces Controller. Required. "S1"
:vartype name: str or ~azure.mgmt.devspaces.models.SkuName
:ivar tier: The tier of the SKU for Azure Dev Spaces Controller. "Standard"
:vartype tier: str or ~azure.mgmt.devspaces.models.SkuTier
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
}
def __init__(
self,
*,
name: Union[str, "_models.SkuName"],
tier: Optional[Union[str, "_models.SkuTier"]] = None,
**kwargs: Any
) -> None:
"""
:keyword name: The name of the SKU for Azure Dev Spaces Controller. Required. "S1"
:paramtype name: str or ~azure.mgmt.devspaces.models.SkuName
:keyword tier: The tier of the SKU for Azure Dev Spaces Controller. "Standard"
:paramtype tier: str or ~azure.mgmt.devspaces.models.SkuTier
"""
super().__init__(**kwargs)
self.name = name
self.tier = tier
|
6832cd82fdc5bd1e82ea73aa3625400011b71df5
|
eda03521b87da8bdbef6339b5b252472a5be8d23
|
/Meta/check-emoji.py
|
ae8e7ace85ff285eaba6ea13314c7b91f6f93219
|
[
"BSD-2-Clause"
] |
permissive
|
SerenityOS/serenity
|
6ba3ffb242ed76c9f335bd2c3b9a928329cd7d98
|
ef9b6c25fafcf4ef0b44a562ee07f6412aeb8561
|
refs/heads/master
| 2023-09-01T13:04:30.262106
| 2023-09-01T08:06:28
| 2023-09-01T10:45:38
| 160,083,795
| 27,256
| 3,929
|
BSD-2-Clause
| 2023-09-14T21:00:04
| 2018-12-02T19:28:41
|
C++
|
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
check-emoji.py
|
#!/usr/bin/env python3
import os
import re
import sys
RE_INVALID_CHAR = re.compile('[^A-FU0-9+_]')
RE_MISSING_UNDERSCORE = re.compile('[^_]U')
RE_MISSING_LETTER_U = re.compile('_(?!U)')
RE_MISSING_SIGN_PLUS = re.compile('U(?!\\+)')
def any_problems_here():
found_invalid_filenames = False
for filename in os.listdir():
if not filename.endswith('.png'):
print(f'Non-png file {filename} does not belong in the emoji directory')
found_invalid_filenames = True
break
filename = filename[:-len('.png')]
if RE_INVALID_CHAR.search(filename):
print(f'Filename {filename}.png contains invalid characters in its filename. Only uppercase letters'
' A-F and U, numbers, +, and _ should be used.')
found_invalid_filenames = True
break
if 'U+0' in filename:
print(f'Filename {filename}.png contains codepoint(s) with leading zeros. Leading zeros should be'
' removed from codepoint(s).')
found_invalid_filenames = True
break
if '+U' in filename:
print(f'Filename {filename}.png is incorrectly named. "_" should be used as a separator between'
' codepoints, not "+".')
found_invalid_filenames = True
break
if RE_MISSING_UNDERSCORE.search(filename):
print(f'Filename {filename}.png is missing an underscore "_" between codepoints.')
found_invalid_filenames = True
break
if RE_MISSING_LETTER_U.search(filename):
print(f'Filename {filename}.png is either missing a "U" to indicate the start of a codepoint,'
' or has a spurious underscore ("_").')
found_invalid_filenames = True
break
if RE_MISSING_SIGN_PLUS.search(filename):
print(f'Filename {filename}.png is either missing a "+" after a "U", or has a spurious "U".')
found_invalid_filenames = True
break
if 'U+FE0F' in filename:
print(f'Filename {filename}.png should not include any emoji presentation selectors. U+FE0F codepoints'
' should be removed from the filename.')
found_invalid_filenames = True
break
code_points = [int(code_point[len('U+'):], 16) for code_point in filename.split('_')]
if any(code_point > 0x10ffff for code_point in code_points):
print(f'Filename {filename}.png contains a code point exceeding U+10FFFF')
found_invalid_filenames = True
break
return found_invalid_filenames
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__) + "/../Base/res/emoji/")
if any_problems_here():
sys.exit(1)
|
b29f44d18c22baf78b1edc8f8fc1f1257df1af4b
|
e6e888ea52987c5ac4e7ce143e947bc2c4839de8
|
/generate_poem.py
|
2aedc95404de74526efcffb0ea1717ebb258e4f9
|
[
"Apache-2.0"
] |
permissive
|
IlyaGusev/rupo
|
23078b2b07bb6443c0dc09287a3d584728a2b6a8
|
3d114f92dec08c16d28e7e5a1076cd7ea871043f
|
refs/heads/master
| 2022-07-21T00:33:48.023357
| 2022-07-03T14:23:36
| 2022-07-03T14:23:36
| 85,340,357
| 185
| 35
| null | 2017-08-31T00:00:11
| 2017-03-17T17:54:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
generate_poem.py
|
import argparse
from rupo.api import Engine
from rupo.settings import RU_STRESS_DEFAULT_MODEL, ZALYZNYAK_DICT, GENERATOR_MODEL_DIR
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', type=str, default=GENERATOR_MODEL_DIR)
parser.add_argument('--token-vocab-path', type=str, default=None)
parser.add_argument('--stress-vocab-path', type=str, default=None)
parser.add_argument('--metre-schema', type=str, default='+-')
parser.add_argument('--rhyme-pattern', type=str, default='abab')
parser.add_argument('--n-syllables', type=int, default=8)
parser.add_argument('--sampling-k', type=int, default=50000)
parser.add_argument('--beam-width', type=int, default=None)
parser.add_argument('--temperature', type=float, default=1.0)
parser.add_argument('--last-text', type=str, default="")
parser.add_argument('--count', type=int, default=100)
args = parser.parse_args()
kwargs = vars(args)
count = kwargs.pop('count')
engine = Engine()
engine.load(RU_STRESS_DEFAULT_MODEL, ZALYZNYAK_DICT)
for seed in range(count):
print(seed)
try:
poem = engine.generate_poem(seed=seed, **kwargs)
print(poem)
except AssertionError as e:
print("Error: ", e)
|
0484afcc5349b2139a66d22661453f2c91749609
|
11ccb6827cf643b37c44a2e174422f9c6f9497f2
|
/falcon/request_helpers.py
|
7534db108cc088fc7bcd765a967badf5d88c2963
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
falconry/falcon
|
7506f838520e5105714702d9a9b2f0e608a704b9
|
32207fe8a7ebdfb95271d8430c4977c7a654928c
|
refs/heads/master
| 2023-08-31T05:32:03.755869
| 2023-08-21T21:45:34
| 2023-08-21T21:45:34
| 7,040,500
| 8,922
| 1,183
|
Apache-2.0
| 2023-09-09T20:58:36
| 2012-12-06T18:17:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,153
|
py
|
request_helpers.py
|
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the Request class."""
from http import cookies as http_cookies
import re
# TODO: Body, BoundedStream import here is for backwards-compatibility
# and it should be removed in Falcon 4.0
from falcon.stream import Body # NOQA
from falcon.stream import BoundedStream # NOQA
from falcon.util import ETag
# https://tools.ietf.org/html/rfc6265#section-4.1.1
#
# NOTE(kgriffs): Fortunately we don't have to worry about code points in
# header strings outside the range 0x0000 - 0x00FF per PEP 3333
# (see also: https://www.python.org/dev/peps/pep-3333/#unicode-issues)
#
_COOKIE_NAME_RESERVED_CHARS = re.compile(
'[\x00-\x1F\x7F-\xFF()<>@,;:\\\\"/[\\]?={} \x09]'
)
# NOTE(kgriffs): strictly speaking, the weakness indicator is
# case-sensitive, but this wasn't explicit until RFC 7232
# so we allow for both. We also require quotes because that's
# been standardized since 1999, and it makes the regex simpler
# and more performant.
_ENTITY_TAG_PATTERN = re.compile(r'([Ww]/)?"([^"]*)"')
def parse_cookie_header(header_value):
"""Parse a Cookie header value into a dict of named values.
(See also: RFC 6265, Section 5.4)
Args:
header_value (str): Value of a Cookie header
Returns:
dict: Map of cookie names to a list of all cookie values found in the
header for that name. If a cookie is specified more than once in the
header, the order of the values will be preserved.
"""
# See also:
#
# https://tools.ietf.org/html/rfc6265#section-5.4
# https://tools.ietf.org/html/rfc6265#section-4.1.1
#
cookies = {}
for token in header_value.split(';'):
name, __, value = token.partition('=')
# NOTE(kgriffs): RFC6265 is more strict about whitespace, but we
# are more lenient here to better handle old user agents and to
# mirror Python's standard library cookie parsing behavior
name = name.strip()
value = value.strip()
# NOTE(kgriffs): Skip malformed cookie-pair
if not name:
continue
# NOTE(kgriffs): Skip cookies with invalid names
if _COOKIE_NAME_RESERVED_CHARS.search(name):
continue
# NOTE(kgriffs): To maximize compatibility, we mimic the support in the
# standard library for escaped characters within a double-quoted
# cookie value according to the obsolete RFC 2109. However, we do not
# expect to see this encoding used much in practice, since Base64 is
# the current de-facto standard, as recommended by RFC 6265.
#
# PERF(kgriffs): These checks have been hoisted from within _unquote()
# to avoid the extra function call in the majority of the cases when it
# is not needed.
if len(value) > 2 and value[0] == '"' and value[-1] == '"':
value = http_cookies._unquote(value)
# PERF(kgriffs): This is slightly more performant as
# compared to using dict.setdefault()
if name in cookies:
cookies[name].append(value)
else:
cookies[name] = [value]
return cookies
def header_property(wsgi_name):
"""Create a read-only header property.
Args:
wsgi_name (str): Case-sensitive name of the header as it would
appear in the WSGI environ ``dict`` (i.e., 'HTTP_*')
Returns:
A property instance than can be assigned to a class variable.
"""
def fget(self):
try:
return self.env[wsgi_name] or None
except KeyError:
return None
return property(fget)
# NOTE(kgriffs): Going forward we should privatize helpers, as done here. We
# can always move this over to falcon.util if we decide it would be
# more generally useful to app developers.
def _parse_etags(etag_str):
"""Parse a string containing one or more HTTP entity-tags.
The string is assumed to be formatted as defined for a precondition
header, and may contain either a single ETag, or multiple comma-separated
ETags. The string may also contain a '*' character, in order to indicate
that any ETag should match the precondition.
(See also: RFC 7232, Section 3)
Args:
etag_str (str): An ASCII header value to parse ETags from. ETag values
within may be prefixed by ``W/`` to indicate that the weak comparison
function should be used.
Returns:
list: A list of unquoted ETags or ``['*']`` if all ETags should be
matched. If the string to be parse is empty, or contains only
whitespace, ``None`` will be returned instead.
"""
etag_str = etag_str.strip()
if not etag_str:
return None
if etag_str == '*':
return [etag_str]
if ',' not in etag_str:
return [ETag.loads(etag_str)]
etags = []
# PERF(kgriffs): Parsing out the weak string like this turns out to be more
# performant than grabbing the entire entity-tag and passing it to
# ETag.loads(). This is also faster than parsing etag_str manually via
# str.find() and slicing.
for weak, value in _ENTITY_TAG_PATTERN.findall(etag_str):
t = ETag(value)
t.is_weak = bool(weak)
etags.append(t)
# NOTE(kgriffs): Normalize a string with only whitespace and commas
# to None, since it is like a list of individual ETag headers that
# are all set to nothing, and so therefore basically should be
# treated as not having been set in the first place.
return etags or None
|
3d3b260cc83a550aaa286c774fdb9f66bf70d0bc
|
a9c359681631e8344f55163a2d69018ed02c0a90
|
/openr/py/openr/cli/clis/lm.py
|
d3753c75898200d497fe9bd5b6f4eab27e4521f1
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
facebook/openr
|
66c82707ae47fa5ed711c20f0355ad7100a3cf1c
|
8e4c6e553f0314763c1595dd6097dd578d771f1c
|
refs/heads/main
| 2023-09-03T02:55:03.399114
| 2023-07-26T16:46:46
| 2023-07-26T16:46:46
| 108,306,129
| 936
| 295
|
MIT
| 2023-08-31T23:03:31
| 2017-10-25T17:59:53
|
C++
|
UTF-8
|
Python
| false
| false
| 9,458
|
py
|
lm.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import bunch
import click
from openr.cli.commands import kvstore, lm
from openr.cli.utils import utils
from openr.cli.utils.utils import parse_nodes
class LMCli:
def __init__(self):
# [Show Cmd]
self.lm.add_command(LMLinksCli().links, name="links")
self.lm.add_command(LMAdjCli().adj, name="adj")
self.lm.add_command(LMValidateCli().validate, name="validate")
# [Hard-Drain] set node overload
self.lm.add_command(
SetNodeOverloadCli().set_node_overload, name="set-node-overload"
)
self.lm.add_command(
UnsetNodeOverloadCli().unset_node_overload, name="unset-node-overload"
)
# [Hard-Drain] set link overload
self.lm.add_command(
SetLinkOverloadCli().set_link_overload, name="set-link-overload"
)
self.lm.add_command(
UnsetLinkOverloadCli().unset_link_overload, name="unset-link-overload"
)
# [Soft-Drain] set node metric increment
self.lm.add_command(
IncreaseNodeMetricCli().increase_node_metric, name="increase-node-metric"
)
self.lm.add_command(
ClearNodeMetricCli().clear_node_metric,
name="clear-node-metric-increase",
)
# [Soft-Drain] set link metric increment
self.lm.add_command(
IncreaseLinkMetricCli().increase_link_metric, name="increase-link-metric"
)
self.lm.add_command(
ClearLinkMetricCli().clear_link_metric,
name="clear-link-metric-increase",
)
# [Metric Override]
self.lm.add_command(
OverrideAdjMetricCli().override_adj_metric, name="override-adj-metric"
)
self.lm.add_command(
ClearAdjMetricOverrideCli().clear_adj_metric_override,
name="clear-adj-metric-override",
)
@click.group()
@click.pass_context
def lm(ctx): # noqa: B902
"""CLI tool to peek into Link Monitor module."""
pass
class LMValidateCli:
@click.command()
@click.pass_obj
def validate(cli_opts): # noqa: B902
"""Run checks on discovered interfaces"""
lm.LMValidateCmd(cli_opts).run()
class LMLinksCli:
@click.command()
@click.option(
"--only-suppressed",
default=False,
is_flag=True,
help="Only show suppressed links",
)
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.pass_obj
def links(cli_opts, only_suppressed, json): # noqa: B902
"""Dump all known links of the current host"""
lm.LMLinksCmd(cli_opts).run(only_suppressed, json)
class LMAdjCli:
@click.command()
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.argument("areas", nargs=-1)
@click.pass_obj
def adj(cli_opts: bunch.Bunch, json: bool, areas: List[str]): # noqa: B902
"""Dump all formed adjacencies of the current host"""
nodes = parse_nodes(cli_opts, "")
lm.LMAdjCmd(cli_opts).run(nodes, json, areas)
"""
[Hard-Drain]
- Node Level Overload;
- Link Level Overload;
"""
class SetNodeOverloadCli:
@click.command()
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.pass_obj
def set_node_overload(cli_opts, yes): # noqa: B902
"""Set overload bit to stop transit traffic through node."""
lm.SetNodeOverloadCmd(cli_opts).run(yes)
class UnsetNodeOverloadCli:
@click.command()
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.pass_obj
def unset_node_overload(cli_opts, yes): # noqa: B902
"""Unset overload bit to resume transit traffic through node."""
lm.UnsetNodeOverloadCmd(cli_opts).run(yes)
class SetLinkOverloadCli:
@click.command()
@click.argument("interface")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.pass_obj
def set_link_overload(cli_opts, interface, yes): # noqa: B902
"""Set overload bit for a link. Transit traffic will be drained."""
lm.SetLinkOverloadCmd(cli_opts).run(interface, yes)
class UnsetLinkOverloadCli:
@click.command()
@click.argument("interface")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.pass_obj
def unset_link_overload(cli_opts, interface, yes): # noqa: B902
"""Unset overload bit for a link to allow transit traffic."""
lm.UnsetLinkOverloadCmd(cli_opts).run(interface, yes)
"""
[Soft-Drain]
- Node Level Metric Increment
- Link Level Metric Increment
"""
class IncreaseNodeMetricCli:
@click.command()
@click.argument("metric")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def increase_node_metric(cli_opts, metric, yes, quiet): # noqa: B902
"""
Increase node-level metric for soft-drain behavior.
"""
# increase node metric
lm.IncreaseNodeMetricCmd(cli_opts).run(metric, yes)
# show adj metric result
if not quiet:
nodes = parse_nodes(cli_opts, "")
lm.LMAdjCmd(cli_opts).run(nodes, False)
class ClearNodeMetricCli:
@click.command()
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def clear_node_metric(cli_opts, yes, quiet): # noqa: B902
"""
Clear node-level metric increment for soft-drain behavior.
"""
# clear node metric increment
lm.ClearNodeMetricCmd(cli_opts).run(yes)
# show adj metric result
if not quiet:
nodes = parse_nodes(cli_opts, "")
lm.LMAdjCmd(cli_opts).run(nodes, False)
class IncreaseLinkMetricCli:
@click.command()
@click.argument("interface", nargs=-1, required=True)
@click.argument("metric")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def increase_link_metric(cli_opts, interface, metric, yes, quiet): # noqa: B902
"""
Increase link-level metric for soft-drain behavior.
"""
# increase link metric
lm.IncreaseLinkMetricCmd(cli_opts).run(interface, metric, yes)
# show adj metric result
if not quiet:
nodes = parse_nodes(cli_opts, "")
lm.LMAdjCmd(cli_opts).run(nodes, False)
class ClearLinkMetricCli:
@click.command()
@click.argument("interface", nargs=-1, required=True)
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def clear_link_metric(cli_opts, interface, yes, quiet): # noqa: B902
"""
Clear link-level metric increment for soft-drain behavior.
"""
# clear link metric increment
lm.ClearLinkMetricCmd(cli_opts).run(interface, yes)
# show adj metric result
if not quiet:
nodes = parse_nodes(cli_opts, "")
lm.LMAdjCmd(cli_opts).run(nodes, False)
class OverrideAdjMetricCli:
@click.command()
@click.argument("node")
@click.argument("interface")
@click.argument("metric")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def override_adj_metric(
cli_opts, node, interface, metric, yes, quiet # noqa: B902
):
"""
Override the adjacency metric value.
"""
question_str = "Are you sure to override metric for adjacency {} {} ?".format(
node, interface
)
if not utils.yesno(question_str, yes):
return
lm.OverrideAdjMetricCmd(cli_opts).run(node, interface, metric, yes)
if not quiet:
nodes = parse_nodes(cli_opts, "")
kvstore.ShowAdjNodeCmd(cli_opts).run(nodes, node, interface)
class ClearAdjMetricOverrideCli:
@click.command()
@click.argument("node")
@click.argument("interface")
@click.option("--yes", is_flag=True, help="Make command non-interactive")
@click.option("--quiet", is_flag=True, help="Do not print out the links table")
@click.pass_obj
def clear_adj_metric_override(cli_opts, node, interface, yes, quiet): # noqa: B902
"""
Clear previously overridden adjacency metric value.
"""
question_str = "Are you sure to unset metric " "for adjacency {} {} ?".format(
node, interface
)
if not utils.yesno(question_str, yes):
return
lm.ClearAdjMetricOverrideCmd(cli_opts).run(node, interface, yes)
if not quiet:
nodes = parse_nodes(cli_opts, "")
kvstore.ShowAdjNodeCmd(cli_opts).run(nodes, node, interface)
|
37433f7cc6bd8877df7702065c77a3fbc679bd50
|
1a3b369d0eed3455545567400d37dca114042902
|
/pydexcom/__init__.py
|
2d5487071c296b7dd934bec66ab4e11a09fdf998
|
[
"MIT"
] |
permissive
|
gagebenne/pydexcom
|
96bcd5bea346c785823d1d6f6af2ef0ae9016b62
|
b6982950757ee3fd85715bd4c67017637a646560
|
refs/heads/main
| 2023-08-11T06:57:26.691374
| 2023-08-09T01:05:30
| 2023-08-09T01:05:30
| 239,246,796
| 107
| 33
|
MIT
| 2023-09-08T16:05:51
| 2020-02-09T04:31:44
|
Python
|
UTF-8
|
Python
| false
| false
| 11,083
|
py
|
__init__.py
|
"""
.. include:: ../README.md
"""
import logging
import re
from datetime import datetime
from typing import Any, Dict, List, Optional
import requests
from .const import (
DEFAULT_UUID,
DEXCOM_APPLICATION_ID,
DEXCOM_AUTHENTICATE_ENDPOINT,
DEXCOM_BASE_URL,
DEXCOM_BASE_URL_OUS,
DEXCOM_GLUCOSE_READINGS_ENDPOINT,
DEXCOM_LOGIN_ID_ENDPOINT,
DEXCOM_TREND_DIRECTIONS,
MAX_MAX_COUNT,
MAX_MINUTES,
MMOL_L_CONVERSION_FACTOR,
REQUEST_TIMEOUT,
TREND_ARROWS,
TREND_DESCRIPTIONS,
)
from .errors import (
AccountError,
AccountErrorEnum,
ArgumentError,
ArgumentErrorEnum,
DexcomError,
SessionError,
SessionErrorEnum,
)
_LOGGER = logging.getLogger("pydexcom")
class GlucoseReading:
"""Class for parsing glucose reading from Dexcom Share API."""
def __init__(self, json_glucose_reading: Dict[str, Any]):
"""Initialize `GlucoseReading` with JSON glucose reading from Dexcom Share API.
:param json_glucose_reading: JSON glucose reading from Dexcom Share API
"""
self._json = json_glucose_reading
try:
self._value = int(json_glucose_reading["Value"])
self._trend_direction: str = json_glucose_reading["Trend"]
# Dexcom Share API returns `str` direction now, previously `int` trend
self._trend: int = DEXCOM_TREND_DIRECTIONS[self._trend_direction]
self._datetime = datetime.fromtimestamp(
int(re.sub("[^0-9]", "", json_glucose_reading["WT"])) / 1000.0
)
except (KeyError, TypeError, ValueError):
raise ArgumentError(ArgumentErrorEnum.GLUCOSE_READING_INVALID)
@property
def value(self) -> int:
"""Blood glucose value in mg/dL."""
return self._value
@property
def mg_dl(self) -> int:
"""Blood glucose value in mg/dL."""
return self._value
@property
def mmol_l(self) -> float:
"""Blood glucose value in mmol/L."""
return round(self.value * MMOL_L_CONVERSION_FACTOR, 1)
@property
def trend(self) -> int:
"""Blood glucose trend information
(value of `pydexcom.const.DEXCOM_TREND_DIRECTIONS`)."""
return self._trend
@property
def trend_direction(self) -> str:
"""Blood glucose trend direction
(key of `pydexcom.const.DEXCOM_TREND_DIRECTIONS`)."""
return self._trend_direction
@property
def trend_description(self) -> Optional[str]:
"""Blood glucose trend information description
(`pydexcom.const.TREND_DESCRIPTIONS`).
"""
return TREND_DESCRIPTIONS[self._trend]
@property
def trend_arrow(self) -> str:
"""Blood glucose trend as unicode arrow (`pydexcom.const.TREND_ARROWS`)."""
return TREND_ARROWS[self._trend]
@property
def datetime(self) -> datetime:
"""Glucose reading recorded time as datetime."""
return self._datetime
@property
def json(self) -> Dict[str, Any]:
"""JSON glucose reading from Dexcom Share API."""
return self._json
def __str__(self) -> str:
return str(self._value)
class Dexcom:
"""Class for communicating with Dexcom Share API."""
def __init__(self, username: str, password: str, ous: bool = False):
"""
Initialize `Dexcom` with Dexcom Share credentials.
:param username: username for the Dexcom Share user, *not follower*.
:param password: password for the Dexcom Share user.
:param ous: whether the Dexcom Share user is outside of the US.
"""
self._base_url = DEXCOM_BASE_URL_OUS if ous else DEXCOM_BASE_URL
self._username = username
self._password = password
self._account_id: Optional[str] = None
self._session_id: Optional[str] = None
self.__session = requests.Session()
self._session()
def _post(
self,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
) -> Any:
"""Send post request to Dexcom Share API.
:param endpoint: URL of the post request
:param params: `dict` to send in the query string of the post request
:param json: JSON to send in the body of the post request
"""
response = self.__session.post(
f"{self._base_url}/{endpoint}",
headers={"Accept-Encoding": "application/json"},
params=params,
json={} if json is None else json,
timeout=REQUEST_TIMEOUT,
)
try:
response.raise_for_status()
return response.json()
except requests.HTTPError as http_error:
error = self._handle_response(response)
if error:
raise error from http_error
_LOGGER.error("%s", response.text)
raise http_error
def _handle_response(self, response: requests.Response) -> Optional[DexcomError]:
error: Optional[DexcomError] = None
"""
Parse `requests.Response` for `pydexcom.errors.DexcomError`.
:param response: `requests.Response` to parse
"""
if response.json():
_LOGGER.debug("%s", response.json())
code = response.json().get("Code", None)
message = response.json().get("Message", None)
if code == "SessionIdNotFound":
error = SessionError(SessionErrorEnum.NOT_FOUND)
elif code == "AccountPasswordInvalid":
error = AccountError(AccountErrorEnum.PASSWORD_INVALID)
elif code == "SSO_AuthenticateMaxAttemptsExceeed":
error = AccountError(AccountErrorEnum.MAX_ATTEMPTS)
elif code == "InvalidArgument":
if message and "accountName" in message:
error = ArgumentError(ArgumentErrorEnum.USERNAME_INVALID)
elif message and "password" in message:
error = ArgumentError(ArgumentErrorEnum.PASSWORD_INVALID)
elif message and "UUID" in message:
error = ArgumentError(ArgumentErrorEnum.ACCOUNT_ID_INVALID)
elif code and message:
_LOGGER.error("%s: %s", code, message)
return error
def _validate_session_id(self) -> None:
"""Validate session ID."""
if any([not isinstance(self._session_id, str), not self._session_id]):
raise ArgumentError(ArgumentErrorEnum.SESSION_ID_INVALID)
if self._session_id == DEFAULT_UUID:
raise ArgumentError(ArgumentErrorEnum.SESSION_ID_DEFAULT)
def _validate_credentials(self) -> None:
"""Validate credentials."""
if any([not isinstance(self._username, str), not self._username]):
raise ArgumentError(ArgumentErrorEnum.USERNAME_INVALID)
if any([not isinstance(self._password, str), not self._password]):
raise ArgumentError(ArgumentErrorEnum.PASSWORD_INVALID)
def _validate_account_id(self) -> None:
"""Validate account ID."""
if any([not isinstance(self._account_id, str), not self._account_id]):
raise ArgumentError(ArgumentErrorEnum.ACCOUNT_ID_INVALID)
if self._account_id == DEFAULT_UUID:
raise ArgumentError(ArgumentErrorEnum.ACCOUNT_ID_DEFAULT)
def _get_account_id(self) -> str:
"""Retrieve account ID from the authentication endpoint
(`pydexcom.const.DEXCOM_AUTHENTICATE_ENDPOINT`)."""
return self._post(
DEXCOM_AUTHENTICATE_ENDPOINT,
json={
"accountName": self._username,
"password": self._password,
"applicationId": DEXCOM_APPLICATION_ID,
},
)
def _get_session_id(self) -> str:
"""Retrieve session ID from the login endpoint
(`pydexcom.const.DEXCOM_LOGIN_ID_ENDPOINT`)."""
return self._post(
DEXCOM_LOGIN_ID_ENDPOINT,
json={
"accountId": self._account_id,
"password": self._password,
"applicationId": DEXCOM_APPLICATION_ID,
},
)
def _session(self) -> None:
"""Create Dexcom Share API session."""
self._validate_credentials()
if self._account_id is None:
self._account_id = self._get_account_id()
self._validate_account_id()
self._session_id = self._get_session_id()
self._validate_session_id()
def _get_glucose_readings(
self, minutes: int = MAX_MINUTES, max_count: int = MAX_MAX_COUNT
) -> List[Dict[str, Any]]:
"""Retrieve glucose readings from the glucose readings endpoint
(`pydexcom.const.DEXCOM_GLUCOSE_READINGS_ENDPOINT`)."""
if not isinstance(minutes, int) or any([minutes < 0, minutes > MAX_MINUTES]):
raise ArgumentError(ArgumentErrorEnum.MINUTES_INVALID)
if not isinstance(max_count, int) or any(
[max_count < 0, max_count > MAX_MAX_COUNT]
):
raise ArgumentError(ArgumentErrorEnum.MAX_COUNT_INVALID)
return self._post(
DEXCOM_GLUCOSE_READINGS_ENDPOINT,
params={
"sessionId": self._session_id,
"minutes": minutes,
"maxCount": max_count,
},
)
def get_glucose_readings(
self, minutes: int = MAX_MINUTES, max_count: int = MAX_MAX_COUNT
) -> List[GlucoseReading]:
"""Get `max_count` glucose readings within specified number of `minutes`.
Catches one instance of a thrown `pydexcom.errors.SessionError` if session ID
expired, attempts to get a new session ID and retries.
:param minutes: Number of minutes to retrieve glucose readings from (1-1440)
:param max_count: Maximum number of glucose readings to retrieve (1-288)
"""
json_glucose_readings: List[Dict[str, Any]] = []
try:
# Requesting glucose reading with DEFAULT_UUID returns non-JSON empty string
self._validate_session_id()
json_glucose_readings = self._get_glucose_readings(minutes, max_count)
except SessionError:
# Attempt to update expired session ID
self._session()
json_glucose_readings = self._get_glucose_readings(minutes, max_count)
return [GlucoseReading(json_reading) for json_reading in json_glucose_readings]
def get_latest_glucose_reading(self) -> Optional[GlucoseReading]:
"""Get latest available glucose reading, within the last 24 hours."""
glucose_readings = self.get_glucose_readings(max_count=1)
return glucose_readings[0] if glucose_readings else None
def get_current_glucose_reading(self) -> Optional[GlucoseReading]:
"""Get current available glucose reading, within the last 10 minutes."""
glucose_readings = self.get_glucose_readings(minutes=10, max_count=1)
return glucose_readings[0] if glucose_readings else None
|
23df0fd9d243bced81ba69076bfa9bdcd6229bbb
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/tests/test_audit_log.py
|
4faba09f31486f6933c79095a192495a63f043d7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 22,789
|
py
|
test_audit_log.py
|
import datetime
import json
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from wagtail.log_actions import LogActionRegistry
from wagtail.models import (
Page,
PageLogEntry,
PageViewRestriction,
Task,
Workflow,
WorkflowTask,
)
from wagtail.models.audit_log import ModelLogEntry
from wagtail.test.testapp.models import FullFeaturedSnippet, SimplePage
from wagtail.test.utils import WagtailTestUtils
class TestAuditLogManager(WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.create_superuser(
username="administrator",
email="administrator@email.com",
password="password",
)
self.page = Page.objects.get(pk=1)
self.simple_page = self.page.add_child(
instance=SimplePage(
title="Simple page", slug="simple", content="Hello", owner=self.user
)
)
def test_log_action(self):
now = timezone.now()
with freeze_time(now):
entry = PageLogEntry.objects.log_action(
self.page, "wagtail.edit", user=self.user
)
self.assertEqual(entry.content_type, self.page.content_type)
self.assertEqual(entry.user, self.user)
self.assertEqual(entry.timestamp, now)
def test_get_for_model(self):
PageLogEntry.objects.log_action(self.page, "wagtail.edit")
PageLogEntry.objects.log_action(self.simple_page, "wagtail.edit")
entries = PageLogEntry.objects.get_for_model(SimplePage)
self.assertEqual(entries.count(), 2)
self.assertListEqual(
list(entries), list(PageLogEntry.objects.filter(page=self.simple_page))
)
def test_get_for_user(self):
self.assertEqual(
PageLogEntry.objects.get_for_user(self.user).count(), 1
) # the create from setUp
class TestAuditLog(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=1)
self.home_page = self.root_page.add_child(
instance=SimplePage(title="Homepage", slug="home2", content="hello")
)
PageLogEntry.objects.all().delete() # clean up the log entries here.
def test_page_create(self):
self.assertEqual(PageLogEntry.objects.count(), 0) # homepage
page = self.home_page.add_child(
instance=SimplePage(title="Hello", slug="my-page", content="world")
)
self.assertEqual(PageLogEntry.objects.count(), 1)
log_entry = PageLogEntry.objects.order_by("pk").last()
self.assertEqual(log_entry.action, "wagtail.create")
self.assertEqual(log_entry.page_id, page.id)
self.assertEqual(log_entry.content_type, page.content_type)
self.assertEqual(log_entry.label, page.get_admin_display_title())
def test_alias_create_from_published_page_doesnt_log_publish_action(self):
self.home_page.live = True
self.home_page.save()
alias = self.home_page.create_alias(update_slug="the-alias")
self.assertTrue(alias.live)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.publish").count(), 0
)
def test_page_edit(self):
# Directly saving a revision should not yield a log entry
self.home_page.save_revision()
self.assertEqual(PageLogEntry.objects.count(), 0)
# Explicitly ask to record the revision change
self.home_page.save_revision(log_action=True)
self.assertEqual(PageLogEntry.objects.count(), 1)
self.assertEqual(PageLogEntry.objects.filter(action="wagtail.edit").count(), 1)
# passing a string for the action should log this.
self.home_page.save_revision(log_action="wagtail.revert")
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.revert").count(), 1
)
def test_page_publish(self):
revision = self.home_page.save_revision()
revision.publish()
self.assertEqual(PageLogEntry.objects.count(), 1)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.publish").count(), 1
)
def test_page_publish_doesnt_log_for_aliases(self):
self.home_page.create_alias(update_slug="the-alias")
revision = self.home_page.save_revision()
revision.publish()
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.publish").count(), 1
)
def test_page_rename(self):
# Should not log a name change when publishing the first revision
revision = self.home_page.save_revision()
self.home_page.title = "Old title"
self.home_page.save()
revision.publish()
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.publish").count(), 1
)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.rename").count(), 0
)
# Now, check the rename is logged
revision = self.home_page.save_revision()
self.home_page.title = "New title"
self.home_page.save()
revision.publish()
self.assertEqual(PageLogEntry.objects.count(), 3)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.publish").count(), 2
)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.rename").count(), 1
)
def test_page_unpublish(self):
self.home_page.unpublish()
self.assertEqual(PageLogEntry.objects.count(), 1)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.unpublish").count(), 1
)
def test_page_unpublish_doesnt_log_for_aliases(self):
self.home_page.create_alias(update_slug="the-alias")
self.home_page.unpublish()
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.unpublish").count(), 1
)
def test_revision_revert(self):
revision1 = self.home_page.save_revision()
self.home_page.save_revision()
self.home_page.save_revision(log_action=True, previous_revision=revision1)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.revert").count(), 1
)
def test_revision_schedule_publish(self):
go_live_at = datetime.datetime.now() + datetime.timedelta(days=1)
if settings.USE_TZ:
go_live_at = timezone.make_aware(go_live_at)
expected_go_live_at = timezone.localtime(go_live_at, datetime.timezone.utc)
else:
expected_go_live_at = go_live_at
self.home_page.go_live_at = go_live_at
# with no live revision
revision = self.home_page.save_revision()
revision.publish()
log_entries = PageLogEntry.objects.filter(action="wagtail.publish.schedule")
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entries[0].data["revision"]["id"], revision.id)
self.assertEqual(
log_entries[0].data["revision"]["go_live_at"],
# skip double quotes
json.dumps(expected_go_live_at, cls=DjangoJSONEncoder)[1:-1],
)
def test_revision_schedule_revert(self):
revision1 = self.home_page.save_revision()
revision2 = self.home_page.save_revision()
if settings.USE_TZ:
self.home_page.go_live_at = timezone.make_aware(
datetime.datetime.now() + datetime.timedelta(days=1)
)
else:
self.home_page.go_live_at = datetime.datetime.now() + datetime.timedelta(
days=1
)
schedule_revision = self.home_page.save_revision(
log_action=True, previous_revision=revision2
)
schedule_revision.publish(previous_revision=revision1)
self.assertListEqual(
list(PageLogEntry.objects.values_list("action", flat=True)),
[
"wagtail.publish.schedule",
"wagtail.revert",
], # order_by -timestamp, by default
)
def test_revision_cancel_schedule(self):
go_live_at = datetime.datetime.now() + datetime.timedelta(days=1)
if settings.USE_TZ:
go_live_at = timezone.make_aware(go_live_at)
expected_go_live_at = timezone.localtime(go_live_at, datetime.timezone.utc)
else:
expected_go_live_at = go_live_at
self.home_page.go_live_at = go_live_at
revision = self.home_page.save_revision()
revision.publish()
revision.approved_go_live_at = None
revision.save(update_fields=["approved_go_live_at"])
log_entries = PageLogEntry.objects.filter(action="wagtail.schedule.cancel")
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entries[0].data["revision"]["id"], revision.id)
self.assertEqual(
log_entries[0].data["revision"]["go_live_at"],
# skip double quotes
json.dumps(expected_go_live_at, cls=DjangoJSONEncoder)[1:-1],
)
# The home_page was live already and we've only cancelled the publication of the above revision.
self.assertTrue(log_entries[0].data["revision"]["has_live_version"])
def test_page_lock_unlock(self):
self.home_page.save(log_action="wagtail.lock")
self.home_page.save(log_action="wagtail.unlock")
self.assertEqual(
PageLogEntry.objects.filter(
action__in=["wagtail.lock", "wagtail.unlock"]
).count(),
2,
)
def test_page_copy(self):
self.home_page.copy(update_attrs={"title": "About us", "slug": "about-us"})
self.assertListEqual(
list(PageLogEntry.objects.values_list("action", flat=True)),
["wagtail.publish", "wagtail.copy", "wagtail.create"],
)
def test_page_reorder(self):
section_1 = self.root_page.add_child(
instance=SimplePage(title="Child 1", slug="child-1", content="hello")
)
self.root_page.add_child(
instance=SimplePage(title="Child 2", slug="child-2", content="hello")
)
user = get_user_model().objects.first()
# Reorder section 1 to be the last page under root_page.
# This should log as `wagtail.reorder` because the page was moved under the same parent page
section_1.move(self.root_page, user=user, pos="last-child")
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.reorder", user=user).count(), 1
)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.move", user=user).count(), 0
)
def test_page_move(self):
section = self.root_page.add_child(
instance=SimplePage(title="About us", slug="about", content="hello")
)
user = get_user_model().objects.first()
# move() interprets `target` as an intended 'sibling' by default, so
# we must use `pos` to indicate that `self.home_page` should be the
# new 'parent'
section.move(self.home_page, pos="last-child", user=user)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.move", user=user).count(), 1
)
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.reorder", user=user).count(), 0
)
def test_page_delete(self):
self.home_page.add_child(
instance=SimplePage(title="Child", slug="child-page", content="hello")
)
child = self.home_page.add_child(
instance=SimplePage(
title="Another child", slug="child-page-2", content="hello"
)
)
child.add_child(
instance=SimplePage(
title="Grandchild", slug="grandchild-page", content="hello"
)
)
# check deleting a parent page logs descendent deletion
self.home_page.delete()
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.delete").count(), 4
)
self.assertEqual(
set(
PageLogEntry.objects.filter(action="wagtail.delete").values_list(
"label", flat=True
)
),
{
"Homepage (simple page)",
"Grandchild (simple page)",
"Child (simple page)",
"Another child (simple page)",
},
)
def test_workflow_actions(self):
workflow = Workflow.objects.create(name="test_workflow")
task_1 = Task.objects.create(name="test_task_1")
task_2 = Task.objects.create(name="test_task_2")
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
WorkflowTask.objects.create(workflow=workflow, task=task_2, sort_order=2)
self.home_page.save_revision()
user = get_user_model().objects.first()
workflow_state = workflow.start(self.home_page, user)
workflow_entry = PageLogEntry.objects.filter(action="wagtail.workflow.start")
self.assertEqual(workflow_entry.count(), 1)
self.assertEqual(
workflow_entry[0].data,
{
"workflow": {
"id": workflow.id,
"title": workflow.name,
"status": workflow_state.status,
"task_state_id": workflow_state.current_task_state_id,
"next": {
"id": workflow_state.current_task_state.task.id,
"title": workflow_state.current_task_state.task.name,
},
}
},
)
# Approve
for action in ["approve", "reject"]:
with self.subTest(action):
task_state = workflow_state.current_task_state
task_state.task.on_action(
task_state,
user=None,
action_name=action,
comment="This is my comment",
)
workflow_state.refresh_from_db()
entry = PageLogEntry.objects.filter(action=f"wagtail.workflow.{action}")
self.assertEqual(entry.count(), 1)
self.assertEqual(
entry[0].data,
{
"workflow": {
"id": workflow.id,
"title": workflow.name,
"status": task_state.status,
"task_state_id": task_state.id,
"task": {
"id": task_state.task.id,
"title": task_state.task.name,
},
"next": {
"id": workflow_state.current_task_state.task.id,
"title": workflow_state.current_task_state.task.name,
},
},
"comment": "This is my comment",
},
)
self.assertEqual(entry[0].comment, "This is my comment")
def test_snippet_workflow_actions(self):
workflow = Workflow.objects.create(name="test_workflow")
task_1 = Task.objects.create(name="test_task_1")
task_2 = Task.objects.create(name="test_task_2")
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
WorkflowTask.objects.create(workflow=workflow, task=task_2, sort_order=2)
snippet = FullFeaturedSnippet.objects.create(text="Initial", live=False)
snippet.save_revision()
user = get_user_model().objects.first()
workflow_state = workflow.start(snippet, user)
workflow_entry = ModelLogEntry.objects.filter(action="wagtail.workflow.start")
self.assertEqual(workflow_entry.count(), 1)
self.assertEqual(
workflow_entry[0].data,
{
"workflow": {
"id": workflow.id,
"title": workflow.name,
"status": workflow_state.status,
"task_state_id": workflow_state.current_task_state_id,
"next": {
"id": workflow_state.current_task_state.task.id,
"title": workflow_state.current_task_state.task.name,
},
}
},
)
# Approve
for action in ["approve", "reject"]:
with self.subTest(action):
task_state = workflow_state.current_task_state
task_state.task.on_action(
task_state,
user=None,
action_name=action,
comment="This is my comment",
)
workflow_state.refresh_from_db()
entry = ModelLogEntry.objects.filter(
action=f"wagtail.workflow.{action}"
)
self.assertEqual(entry.count(), 1)
self.assertEqual(
entry[0].data,
{
"workflow": {
"id": workflow.id,
"title": workflow.name,
"status": task_state.status,
"task_state_id": task_state.id,
"task": {
"id": task_state.task.id,
"title": task_state.task.name,
},
"next": {
"id": workflow_state.current_task_state.task.id,
"title": workflow_state.current_task_state.task.name,
},
},
"comment": "This is my comment",
},
)
self.assertEqual(entry[0].comment, "This is my comment")
def test_workflow_completions_logs_publishing_user(self):
workflow = Workflow.objects.create(name="test_workflow")
task_1 = Task.objects.create(name="test_task_1")
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
self.assertFalse(PageLogEntry.objects.filter(action="wagtail.publish").exists())
self.home_page.save_revision()
user = get_user_model().objects.first()
workflow_state = workflow.start(self.home_page, user)
publisher = get_user_model().objects.last()
task_state = workflow_state.current_task_state
task_state.task.on_action(task_state, user=None, action_name="approve")
self.assertEqual(
PageLogEntry.objects.get(action="wagtail.publish").user, publisher
)
def test_snippet_workflow_completions_logs_publishing_user(self):
workflow = Workflow.objects.create(name="test_workflow")
task_1 = Task.objects.create(name="test_task_1")
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
self.assertFalse(
ModelLogEntry.objects.filter(action="wagtail.publish").exists()
)
snippet = FullFeaturedSnippet.objects.create(text="Initial", live=False)
snippet.save_revision()
user = get_user_model().objects.first()
workflow_state = workflow.start(snippet, user)
publisher = get_user_model().objects.last()
task_state = workflow_state.current_task_state
task_state.task.on_action(task_state, user=None, action_name="approve")
self.assertEqual(
ModelLogEntry.objects.get(action="wagtail.publish").user, publisher
)
def test_page_privacy(self):
restriction = PageViewRestriction.objects.create(page=self.home_page)
self.assertEqual(
PageLogEntry.objects.filter(
action="wagtail.view_restriction.create"
).count(),
1,
)
restriction.restriction_type = PageViewRestriction.PASSWORD
restriction.save()
self.assertEqual(
PageLogEntry.objects.filter(action="wagtail.view_restriction.edit").count(),
1,
)
def test_hook(actions):
return actions.register_action("test.custom_action", "Custom action", "Tested!")
class TestAuditLogHooks(WagtailTestUtils, TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=2)
def test_register_log_actions_hook(self):
log_actions = LogActionRegistry()
self.assertTrue(log_actions.action_exists("wagtail.create"))
def test_action_must_be_registered(self):
# We check actions are registered to let developers know if they have forgotten to register
# a new action or made a spelling mistake. It's not intended as a database-level constraint.
with self.assertRaises(ValidationError) as e:
PageLogEntry.objects.log_action(self.root_page, action="test.custom_action")
self.assertEqual(
e.exception.message_dict,
{
"action": [
"The log action 'test.custom_action' has not been registered."
]
},
)
def test_action_format_message(self):
# All new logs should pass our validation, but older logs or logs that were added in bulk
# may be invalid.
# Using LogEntry.objects.update, we can bypass the on save validation.
log_entry = PageLogEntry.objects.log_action(
self.root_page, action="wagtail.create"
)
PageLogEntry.objects.update(action="test.custom_action")
log_entry.refresh_from_db()
log_actions = LogActionRegistry()
self.assertEqual(log_entry.message, "Unknown test.custom_action")
self.assertFalse(log_actions.action_exists("test.custom_action"))
with self.register_hook("register_log_actions", test_hook):
log_actions = LogActionRegistry()
self.assertTrue(log_actions.action_exists("test.custom_action"))
self.assertEqual(
log_actions.get_formatter(log_entry).format_message(log_entry),
"Tested!",
)
self.assertEqual(
log_actions.get_action_label("test.custom_action"), "Custom action"
)
|
a67d83b291954bb50fd75df5ff5f2e04cd723e25
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/rainbird/config_flow.py
|
a784e4623d6e2694b52036e9378eb8213c183e91
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,079
|
py
|
config_flow.py
|
"""Config flow for Rain Bird."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
from pyrainbird.async_client import (
AsyncRainbirdClient,
AsyncRainbirdController,
RainbirdApiException,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, selector
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
ATTR_DURATION,
CONF_SERIAL_NUMBER,
DEFAULT_TRIGGER_TIME_MINUTES,
DOMAIN,
TIMEOUT_SECONDS,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): selector.TextSelector(),
vol.Required(CONF_PASSWORD): selector.TextSelector(
selector.TextSelectorConfig(type=selector.TextSelectorType.PASSWORD)
),
}
)
class ConfigFlowError(Exception):
"""Error raised during a config flow."""
def __init__(self, message: str, error_code: str) -> None:
"""Initialize ConfigFlowError."""
super().__init__(message)
self.error_code = error_code
class RainbirdConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Rain Bird."""
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> RainBirdOptionsFlowHandler:
"""Define the config flow to handle options."""
return RainBirdOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Configure the Rain Bird device."""
error_code: str | None = None
if user_input:
try:
serial_number = await self._test_connection(
user_input[CONF_HOST], user_input[CONF_PASSWORD]
)
except ConfigFlowError as err:
_LOGGER.error("Error during config flow: %s", err)
error_code = err.error_code
else:
return await self.async_finish(
serial_number,
data={
CONF_HOST: user_input[CONF_HOST],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
},
options={ATTR_DURATION: DEFAULT_TRIGGER_TIME_MINUTES},
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": error_code} if error_code else None,
)
async def _test_connection(self, host: str, password: str) -> str:
"""Test the connection and return the device serial number.
Raises a ConfigFlowError on failure.
"""
controller = AsyncRainbirdController(
AsyncRainbirdClient(
async_get_clientsession(self.hass),
host,
password,
)
)
try:
async with asyncio.timeout(TIMEOUT_SECONDS):
return await controller.get_serial_number()
except asyncio.TimeoutError as err:
raise ConfigFlowError(
f"Timeout connecting to Rain Bird controller: {str(err)}",
"timeout_connect",
) from err
except RainbirdApiException as err:
raise ConfigFlowError(
f"Error connecting to Rain Bird controller: {str(err)}",
"cannot_connect",
) from err
async def async_finish(
self,
serial_number: str,
data: dict[str, Any],
options: dict[str, Any],
) -> FlowResult:
"""Create the config entry."""
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=data[CONF_HOST],
data=data,
options=options,
)
class RainBirdOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a RainBird options flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize RainBirdOptionsFlowHandler."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
ATTR_DURATION,
default=self.config_entry.options[ATTR_DURATION],
): cv.positive_int,
}
),
)
|
b8498d0c8c4a35aba63c147aba7c7b9aa707da9a
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/BUG/FontUtil.py
|
f2bfe300010b8ae365eded61aec8d51f33a9f6b9
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 6,297
|
py
|
FontUtil.py
|
## FontUtil
##
## Utilities for dealing with FontSymbols, "XXX_CHAR" keys, and "[ICON_XXX]" tags in XML messages.
## "init.xml" adds most of the built-in and BUG-related symbol keys by using this module's functions.
## You can add your own using the <symbol> XML entity.
##
## Getting Symbols
##
## getSymbol(symbolOrKey)
## Returns a FontSymbols instance matching the given symbol or key.
## If passed a FontSymbols instance, it is returned. If a string, it is looked
## up in this modules list of known symbols.
##
## getChar(symbolOrKey)
## Returns a string containing a single-character for the desired <symbolOrKey>.
##
## getOrdinal(symbolOrKey)
## Returns the Unicode ordinal for the desired <symbolOrKey>.
##
## Message Processing
##
## replaceSymbols(text, unknownReplacement)
## Returns a copy of <text> after replacing all occurrances of "[ICON_XXX]" with
## the symbols registered in this module. Any symbol that isn't found is replaced
## with <unknownReplacement> (default "").
##
## Notes
## - Must be initialized externally by calling init()
##
## Copyright (c) 2008 The BUG Mod.
##
## Author: EmperorFool
from CvPythonExtensions import *
import BugConfig
import BugDll
import BugUtil
import CvUtil
import re
## constants
UNKNOWN_CHAR = "?"
SYMBOL_REGEXP = re.compile(r"\[ICON_([a-zA-Z0-9_]+)\]")
## globals
gc = CyGlobalContext()
nextSymbolID = int(FontSymbols.MAX_NUM_SYMBOLS)
# key -> symbol (FontSymbols)
keySymbols = {}
# symbol -> primary key (string)
symbolPrimaryKeys = {}
# symbol -> ordinal (int)
symbolOrdinals = {}
# symbol -> character (unicode string)
symbolChars = {}
## initialization and registration
def init():
symbolNames = {}
for name, symbol in FontSymbols.__dict__.iteritems():
if name.endswith("_CHAR") and isinstance(symbol, FontSymbols):
symbolNames[symbol] = name
for key, symbol in CvUtil.OtherFontIcons.iteritems():
addBuiltinSymbol(key, symbol)
if symbol in symbolNames:
name = symbolNames[symbol]
registerSymbolSynonym(key, symbol, name[:-5])
registerSymbolSynonym(key, symbol, name)
# del symbolNames[symbol]
# add the FontSymbols that aren't in CvUtil
# for symbol, name in symbolNames:
# addBuiltinSymbol()
for count, getInfo in (
(YieldTypes.NUM_YIELD_TYPES, gc.getYieldInfo),
(CommerceTypes.NUM_COMMERCE_TYPES, gc.getCommerceInfo),
):
for enum in range(count):
info = getInfo(enum)
addSymbol(info.getType().lower().replace("_", " "),
info.getChar(), info.getType())
def addBuiltinSymbol(key, symbol):
registerSymbol(key, symbol, gc.getGame().getSymbolID(symbol))
def addOffsetSymbol(key, symbolOrKey, offset, name=None):
return addSymbol(key, getOrdinal(symbolOrKey) + offset, name)
def addSymbol(key, ordinal, name=None):
if not name:
name = key.upper().replace(" ", "_")
else:
name = name.upper().replace(" ", "_")
symbolName = name + "_CHAR"
symbol = findOrCreateSymbol(symbolName)
registerSymbol(key, symbol, ordinal)
registerSymbolSynonym(key, symbol, name)
registerSymbolSynonym(key, symbol, symbolName)
return symbol
def findOrCreateSymbol(name):
try:
symbol = getattr(FontSymbols, name)
if isinstance(symbol, FontSymbols):
BugUtil.debug("FontUtil - found FontSymbols name %s", name)
return symbol
except AttributeError:
pass
# create a FontSymbols enum for it
global nextSymbolID
symbol = FontSymbols(nextSymbolID)
nextSymbolID += 1
BugUtil.debug("FontUtil - created FontSymbols.%s", name)
setattr(FontSymbols, name, symbol)
return symbol
def registerSymbol(key, symbol, ordinal):
BugUtil.info("FontUtil - registering symbol '%s' for %d", key, ordinal)
if key in keySymbols:
raise BugUtil.ConfigError("duplicate font symbol key '%s'" % key)
if symbol in symbolPrimaryKeys:
raise BugUtil.ConfigError("duplicate font symbol for key '%s'" % key)
keySymbols[key] = symbol
symbolPrimaryKeys[symbol] = key
symbolOrdinals[symbol] = ordinal
symbolChars[symbol] = u"%c" % ordinal
def registerSymbolSynonym(key, symbol, synonym):
if synonym in keySymbols:
BugUtil.warn("FontUtil - ignoring duplicate synonym '%s' for key '%s'", synonym, key)
else:
BugUtil.debug("FontUtil - registering synonym '%s'", synonym)
keySymbols[synonym] = symbol
## symbol lookup
def getSymbol(symbolOrKey):
if isinstance(symbolOrKey, FontSymbols):
return symbolOrKey
try:
return keySymbols[symbolOrKey]
except KeyError:
try:
return keySymbols[symbolOrKey.upper() + "_CHAR"]
except KeyError:
raise BugUtil.ConfigError("unknown font symbol or key '%s'" % str(symbolOrKey))
def getOrdinal(symbolOrKey):
try:
return symbolOrdinals[getSymbol(symbolOrKey)]
except KeyError:
raise BugUtil.ConfigError("unknown font symbol or key '%s'" % str(symbolOrKey))
def getChar(symbolOrKey):
try:
return symbolChars[getSymbol(symbolOrKey)]
except KeyError:
raise BugUtil.ConfigError("unknown font symbol or key '%s'" % str(symbolOrKey))
## message processing
def replaceSymbols(text, unknownReplacement=""):
def replace(match):
try:
return getChar(match.group(1))
except BugUtil.ConfigError:
return unknownReplacement
return SYMBOL_REGEXP.sub(replace, text)
## configuration handler
class SymbolHandler(BugConfig.Handler):
TAG = "symbol"
def __init__(self):
BugConfig.Handler.__init__(self, SymbolHandler.TAG, "id name from offset dll")
self.addAttribute("id", True)
self.addAttribute("name")
self.addAttribute("from")
self.addAttribute("offset")
self.addAttribute("dll")
self.lastSymbol = None
def handle(self, element, id, name, fromKey, offset, dll):
dll = BugDll.decode(dll)
if self.isDllOkay(element, dll):
if not fromKey:
if not self.lastSymbol:
raise BugUtil.ConfigError("<%s> %s requires an offset symbol" % (element.tag, id))
fromKey = self.lastSymbol
if offset is None:
offset = 1
else:
offset = int(offset)
self.lastSymbol = addOffsetSymbol(id, fromKey, offset, name)
else:
BugUtil.info("FontUtil - ignoring <%s> %s, requires dll version %s", element.tag, id, self.resolveDll(element, dll))
|
f4103c34a92e2faf453a58eb651b8fb1078a18e0
|
517c4d617819aa2090094e92f1817e593354409d
|
/spectral/tests/iterators.py
|
1da443e2dc6c150997cb405f3582ebfbbf27aa69
|
[
"MIT"
] |
permissive
|
spectralpython/spectral
|
2080a7f5d304017827dded890c7617f57335f61b
|
0659ee71614455d99a80ffd4f5f5edd8d032608c
|
refs/heads/master
| 2023-09-04T07:34:08.699690
| 2022-11-13T16:54:18
| 2022-11-13T16:54:18
| 16,567,502
| 527
| 144
|
MIT
| 2023-09-07T11:33:02
| 2014-02-06T03:36:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,748
|
py
|
iterators.py
|
'''
Runs unit tests for iterators.
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.iterators
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
import spectral as spy
from spectral.algorithms.algorithms import iterator, iterator_ij
from spectral.tests.spytest import SpyTest
class IteratorTest(SpyTest):
'''Tests various math functions.'''
def setup(self):
self.image = spy.open_image('92AV3C.lan')
self.gt = spy.open_image('92AV3GT.GIS').read_band(0)
def test_iterator_all(self):
'''Iteration over all pixels.'''
data = self.image.load()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels, 0)
itsum = np.sum(np.array([x for x in iterator(data)]), 0)
assert_allclose(sum, itsum)
def test_iterator_nonzero(self):
'''Iteration over all non-background pixels.'''
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes > 0], 0)
itsum = np.sum(np.array([x for x in iterator(data, self.gt)]), 0)
assert_allclose(sum, itsum)
def test_iterator_index(self):
'''Iteration over single ground truth index'''
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([x for x in iterator(data, self.gt, cls)]), 0)
assert_allclose(sum, itsum)
def test_iterator_ij_nonzero(self):
'''Iteration over all non-background pixels.'''
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes > 0], 0)
itsum = np.sum(np.array([data[ij] for ij in iterator_ij(self.gt)]), 0)
assert_allclose(sum, itsum)
def test_iterator_ij_index(self):
'''Iteration over single ground truth index'''
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([data[ij] for ij in iterator_ij(self.gt,
cls)]),
0)
assert_allclose(sum, itsum)
def test_iterator_spyfile(self):
'''Iteration over SpyFile object for single ground truth index'''
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([x for x in iterator(self.image, self.gt, cls)]),
0)
assert_allclose(sum, itsum)
def test_iterator_spyfile_nomemmap(self):
'''Iteration over SpyFile object without memmap'''
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
image = spy.open_image('92AV3C.lan')
itsum = np.sum(np.array([x for x in iterator(image, self.gt, cls)]), 0)
assert_allclose(sum, itsum)
def run():
print('\n' + '-' * 72)
print('Running iterator tests.')
print('-' * 72)
test = IteratorTest()
test.run()
if __name__ == '__main__':
from spectral.tests.run import parse_args, reset_stats, print_summary
parse_args()
reset_stats()
run()
print_summary()
|
8c5750e7d7134ba99d912d9aa99775d0c1c90647
|
1fb7d21fdb9d0ee6f2a53e626f862d54dcccdbce
|
/update.py
|
e55c8d871f301e9b87c52a46cfac8d28e6d556a0
|
[
"MIT"
] |
permissive
|
xfgryujk/blivechat
|
2b1db9de1047344e45051d680461d8ff61bdd99e
|
fe141bc8fda73e2a683c43181bc4043f9739bb1b
|
refs/heads/dev
| 2023-09-04T10:21:50.371035
| 2023-09-03T14:53:15
| 2023-09-03T14:53:15
| 187,993,487
| 1,695
| 196
|
MIT
| 2023-09-11T11:07:26
| 2019-05-22T08:13:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 916
|
py
|
update.py
|
# -*- coding: utf-8 -*-
import asyncio
import aiohttp
import utils.request
VERSION = 'v1.7.0'
def check_update():
asyncio.get_event_loop().create_task(_do_check_update())
async def _do_check_update():
try:
async with utils.request.http_session.get(
'https://api.github.com/repos/xfgryujk/blivechat/releases/latest'
) as r:
data = await r.json()
if data['name'] != VERSION:
print('---------------------------------------------')
print('New version available:', data['name'])
print(data['body'])
print('Download:', data['html_url'])
print('---------------------------------------------')
except aiohttp.ClientConnectionError:
print('Failed to check update: connection failed')
except asyncio.TimeoutError:
print('Failed to check update: timeout')
|
c3cf85b87dce07638eca1ce82ac75c1529a0ddc2
|
c6e6c564cf03427de02e78f436bdf7483e13402f
|
/tests/test_visitors/test_tokenize/test_primitives/test_string_tokens/test_unicode_prefix.py
|
657bd2cd3e3d86addd9a82b213a5bdc4ea14fa7b
|
[
"MIT"
] |
permissive
|
wemake-services/wemake-python-styleguide
|
5a60ff468bf7877008c8ed34467da8bdbc2398f2
|
96e482514a60c12e99ee235337e678c9a4e484e3
|
refs/heads/master
| 2023-08-31T14:42:36.827760
| 2023-08-29T05:54:18
| 2023-08-29T05:54:18
| 124,593,057
| 2,427
| 572
|
MIT
| 2023-09-13T07:15:00
| 2018-03-09T21:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
test_unicode_prefix.py
|
import pytest
from wemake_python_styleguide.violations.consistency import (
UnicodeStringViolation,
UppercaseStringModifierViolation,
)
from wemake_python_styleguide.visitors.tokenize.primitives import (
WrongStringTokenVisitor,
)
@pytest.mark.parametrize('primitive', [
'u"text"',
"u'unicode'",
'u"5_5"',
'u""',
])
def test_unicode_prefix(
parse_tokens,
assert_errors,
assert_error_text,
default_options,
primitives_usages,
primitive,
mode,
):
"""Ensures that unicode prefixes raise a warning."""
file_tokens = parse_tokens(mode(primitives_usages.format(primitive)))
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [UnicodeStringViolation])
assert_error_text(visitor, primitive)
@pytest.mark.parametrize('primitive', [
'"name"',
'r"text with escape carac \n"',
"b'unicode'",
'"u"',
'"12"',
'b""',
])
def test_correct_strings(
parse_tokens,
assert_errors,
default_options,
primitives_usages,
primitive,
mode,
):
"""Ensures that correct strings are fine."""
file_tokens = parse_tokens(
mode(primitives_usages.format(primitive)),
do_compile=False,
)
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [])
def test_unicode_regression(
parse_tokens,
assert_errors,
default_options,
primitives_usages,
mode,
):
"""Ensures that correct uppercase unicode string raises two violations."""
file_tokens = parse_tokens(mode(primitives_usages.format('U""')))
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [
UnicodeStringViolation,
UppercaseStringModifierViolation,
])
|
75b83d93261df843f80084e6e03290227b92584f
|
eedebf357f678c1418c50c236b4bad1ef254c5be
|
/test/wdt_slow_receiver_test.py
|
0308df4c9f47a1f9a35dc6d5da327cd7f8cf1909
|
[
"BSD-3-Clause"
] |
permissive
|
facebook/wdt
|
c915a71e42ea0d945015eff0140731856ab2d15d
|
3b52ef573129fb799319630bd438717761111f57
|
refs/heads/main
| 2023-09-05T07:25:03.544481
| 2023-07-11T18:42:54
| 2023-07-11T18:42:54
| 22,231,878
| 3,038
| 508
|
NOASSERTION
| 2023-04-16T13:37:33
| 2014-07-24T21:23:34
|
C++
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
wdt_slow_receiver_test.py
|
#!/usr/bin/env python3
# In this test, the receiver is extremely slow. Sender should detect that and
# wait for the receiver to finish. Read timeout for sender is small to trigger
# tcp unacked bytes checking code.
from common_utils import *
# 1 time setup
create_test_directory("/tmp")
generate_random_files(140 * 1024 * 1024)
# test(s)
start_test("slow receiver")
start_receiver("-num_ports=1 -avg_mbytes_per_sec=10")
run_sender("-read_timeout_millis=300 -num_ports=1 -enable_perf_stat_collection")
check_transfer_status()
# md5 and cleanup at the end
exit(verify_transfer_success())
|
1d8fe7325cfad236a1576281ab8968af7ae4e987
|
c1b8b6080f29c8037100080298b897618a826475
|
/gammapy/astro/source/tests/test_pwn.py
|
d371bc38bb54b8e44b3e6d185ea52c829aae84e7
|
[
"BSD-3-Clause"
] |
permissive
|
gammapy/gammapy
|
a5d7acbdde848e92e124fefbce9716faa296f572
|
60f03adb8fc7851b9f3ca039512c03a669e3fe10
|
refs/heads/main
| 2023-08-16T21:19:06.624561
| 2023-08-04T12:13:08
| 2023-08-04T12:13:08
| 10,073,640
| 204
| 184
|
BSD-3-Clause
| 2023-09-14T15:26:05
| 2013-05-15T07:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
test_pwn.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose
from astropy.units import Quantity
from gammapy.astro.source import PWN
t = Quantity([0, 1, 10, 100, 1000, 10000, 100000], "yr")
pwn = PWN()
def test_PWN_radius():
"""Test SNR luminosity"""
r = [0, 1.334e14, 2.114e15, 3.350e16, 5.310e17, 6.927e17, 6.927e17]
assert_allclose(pwn.radius(t).to_value("cm"), r, rtol=1e-3)
def test_magnetic_field():
"""Test SNR luminosity"""
b = [np.nan, 1.753e-03, 8.788e-05, 4.404e-06, 2.207e-07, 4.685e-07, 1.481e-06]
assert_allclose(pwn.magnetic_field(t).to_value("gauss"), b, rtol=1e-3)
|
8086dac2dba479eaca377a7e256fb4dc8c3e17e0
|
154d2907648416fcecefed51deac0f5d021edabb
|
/tests/stream/test_playback.py
|
90cdc6b699c7105726707372d3fef905fe3d8982
|
[
"Apache-2.0"
] |
permissive
|
mopidy/mopidy
|
dbeb376d9c8f85fc0008195a963877b5e0192e18
|
3e8c978d6ffe22fb581a0fec7a47c489ae9bafd4
|
refs/heads/develop
| 2023-08-30T02:09:54.224844
| 2023-08-21T16:29:02
| 2023-08-21T16:29:02
| 447,036
| 7,271
| 783
|
Apache-2.0
| 2023-08-21T16:29:03
| 2009-12-23T14:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 8,174
|
py
|
test_playback.py
|
import logging
from pathlib import Path
from unittest import mock
import pytest
import requests.exceptions
import responses
from mopidy import exceptions
from mopidy.audio import scan
from mopidy.stream import actor
TIMEOUT = 1000
PLAYLIST_URI = "http://example.com/listen.m3u"
STREAM_URI = "http://example.com/stream.mp3"
BODY = """
#EXTM3U
http://example.com/stream.mp3
http://foo.bar/baz
""".strip()
@pytest.fixture()
def config():
return {
"proxy": {},
"stream": {
"timeout": TIMEOUT,
"metadata_blacklist": [],
"protocols": ["http"],
},
"file": {"enabled": False},
}
@pytest.fixture()
def audio():
return mock.Mock()
@pytest.fixture()
def scanner():
patcher = mock.patch.object(scan, "Scanner")
yield patcher.start()()
patcher.stop()
@pytest.fixture()
def backend(audio, config, scanner):
return actor.StreamBackend(audio=audio, config=config)
@pytest.fixture()
def provider(backend):
return backend.playback
class TestTranslateURI:
@responses.activate
def test_audio_stream_returns_same_uri(self, scanner, provider):
scanner.scan.side_effect = [
# Set playable to False to test detection by mimetype
mock.Mock(mime="audio/mpeg", playable=False),
]
result = provider.translate_uri(STREAM_URI)
scanner.scan.assert_called_once_with(STREAM_URI, timeout=mock.ANY)
assert result == STREAM_URI
@responses.activate
def test_playable_ogg_stream_is_not_considered_a_playlist(self, scanner, provider):
scanner.scan.side_effect = [
# Set playable to True to ignore detection as possible playlist
mock.Mock(mime="application/ogg", playable=True),
]
result = provider.translate_uri(STREAM_URI)
scanner.scan.assert_called_once_with(STREAM_URI, timeout=mock.ANY)
assert result == STREAM_URI
@responses.activate
def test_text_playlist_with_mpeg_stream(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = [
# Scanning playlist
mock.Mock(mime="text/foo", playable=False),
# Scanning stream
mock.Mock(mime="audio/mpeg", playable=True),
]
responses.add(
responses.GET,
PLAYLIST_URI,
body=BODY,
content_type="audio/x-mpegurl",
)
result = provider.translate_uri(PLAYLIST_URI)
assert scanner.scan.mock_calls == [
mock.call(PLAYLIST_URI, timeout=mock.ANY),
mock.call(STREAM_URI, timeout=mock.ANY),
]
assert result == STREAM_URI
# Check logging to ensure debuggability
assert f"Unwrapping stream from URI: {PLAYLIST_URI}" in caplog.text
assert f"Parsed playlist ({PLAYLIST_URI})" in caplog.text
assert f"Unwrapping stream from URI: {STREAM_URI}" in caplog.text
assert f"Unwrapped potential audio/mpeg stream: {STREAM_URI}" in caplog.text
# Check proper Requests session setup
assert (
responses.calls[0]
.request.headers["User-Agent"]
.startswith("Mopidy-Stream/")
)
@responses.activate
def test_xml_playlist_with_mpeg_stream(self, scanner, provider):
scanner.scan.side_effect = [
# Scanning playlist
mock.Mock(mime="application/xspf+xml", playable=False),
# Scanning stream
mock.Mock(mime="audio/mpeg", playable=True),
]
responses.add(
responses.GET,
PLAYLIST_URI,
body=BODY,
content_type="application/xspf+xml",
)
result = provider.translate_uri(PLAYLIST_URI)
assert scanner.scan.mock_calls == [
mock.call(PLAYLIST_URI, timeout=mock.ANY),
mock.call(STREAM_URI, timeout=mock.ANY),
]
assert result == STREAM_URI
@responses.activate
def test_scan_fails_but_playlist_parsing_succeeds(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = [
# Scanning playlist
exceptions.ScannerError("some failure"),
# Scanning stream
mock.Mock(mime="audio/mpeg", playable=True),
]
responses.add(
responses.GET,
PLAYLIST_URI,
body=BODY,
content_type="audio/x-mpegurl",
)
result = provider.translate_uri(PLAYLIST_URI)
assert f"Unwrapping stream from URI: {PLAYLIST_URI}" in caplog.text
assert f"GStreamer failed scanning URI ({PLAYLIST_URI})" in caplog.text
assert f"Parsed playlist ({PLAYLIST_URI})" in caplog.text
assert f"Unwrapped potential audio/mpeg stream: {STREAM_URI}" in caplog.text
assert result == STREAM_URI
@responses.activate
def test_scan_fails_and_playlist_parsing_fails(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = exceptions.ScannerError("some failure")
responses.add(
responses.GET,
STREAM_URI,
body=b"some audio data",
content_type="audio/mpeg",
)
result = provider.translate_uri(STREAM_URI)
assert f"Unwrapping stream from URI: {STREAM_URI}" in caplog.text
assert f"GStreamer failed scanning URI ({STREAM_URI})" in caplog.text
assert (
f"Failed parsing URI ({STREAM_URI}) as playlist; found potential stream."
in caplog.text
)
assert result == STREAM_URI
@responses.activate
def test_failed_download_returns_none(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = [mock.Mock(mime="text/foo", playable=False)]
responses.add(
responses.GET,
PLAYLIST_URI,
body=requests.exceptions.HTTPError("Kaboom"),
)
result = provider.translate_uri(PLAYLIST_URI)
assert result is None
assert (
f"Unwrapping stream from URI ({PLAYLIST_URI}) failed: "
f"error downloading URI"
) in caplog.text
@responses.activate
def test_playlist_references_itself(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = [mock.Mock(mime="text/foo", playable=False)]
responses.add(
responses.GET,
PLAYLIST_URI,
body=BODY.replace(STREAM_URI, PLAYLIST_URI),
content_type="audio/x-mpegurl",
)
result = provider.translate_uri(PLAYLIST_URI)
assert f"Unwrapping stream from URI: {PLAYLIST_URI}" in caplog.text
assert (
f"Parsed playlist ({PLAYLIST_URI}) and found new URI: {PLAYLIST_URI}"
) in caplog.text
assert (
f"Unwrapping stream from URI ({PLAYLIST_URI}) failed: "
f"playlist referenced itself"
) in caplog.text
assert result is None
@responses.activate
def test_playlist_with_relative_mpeg_stream(self, scanner, provider, caplog):
caplog.set_level(logging.DEBUG)
scanner.scan.side_effect = [
# Scanning playlist
mock.Mock(mime="text/foo", playable=False),
# Scanning stream
mock.Mock(mime="audio/mpeg", playable=True),
]
responses.add(
responses.GET,
PLAYLIST_URI,
body=BODY.replace(STREAM_URI, Path(STREAM_URI).name),
content_type="audio/x-mpegurl",
)
result = provider.translate_uri(PLAYLIST_URI)
assert scanner.scan.mock_calls == [
mock.call(PLAYLIST_URI, timeout=mock.ANY),
mock.call(STREAM_URI, timeout=mock.ANY),
]
assert result == STREAM_URI
assert (
f"Parsed playlist ({PLAYLIST_URI}) and found new URI: "
f"{Path(STREAM_URI).name}"
) in caplog.text
assert f"Unwrapping stream from URI: {STREAM_URI}" in caplog.text
|
217b13daba87c011a98a0ae09ff5440d3077d02a
|
47c2ac07c2258d2227e5172df07fe3c8a058479f
|
/test/test_notebook_document.py
|
e8e7ac75fa4c4a22d5bd269d9cb741bb4379934f
|
[
"Python-2.0",
"MIT"
] |
permissive
|
python-lsp/python-lsp-server
|
883ad525458d0405a561febfccc6923f7c945b74
|
3f08d8cb96e20b79c042bc6b2a3ad8bc8f078c3a
|
refs/heads/develop
| 2023-08-31T20:39:54.399353
| 2023-08-22T14:26:00
| 2023-08-22T14:26:00
| 341,006,790
| 1,328
| 171
|
MIT
| 2023-09-10T17:22:18
| 2021-02-21T21:21:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,102
|
py
|
test_notebook_document.py
|
# Copyright 2021- Python Language Server Contributors.
import os
import time
from unittest.mock import patch, call
from test.fixtures import CALL_TIMEOUT_IN_SECONDS
import pytest
from pylsp import IS_WIN
from pylsp.lsp import NotebookCellKind
def wait_for_condition(condition, timeout=CALL_TIMEOUT_IN_SECONDS):
"""Wait for a condition to be true, or timeout."""
start_time = time.time()
while not condition():
time.sleep(0.1)
if time.time() - start_time > timeout:
raise TimeoutError("Timeout waiting for condition")
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_initialize(client_server_pair):
client, server = client_server_pair
response = client._endpoint.request(
"initialize",
{
"processId": 1234,
"rootPath": os.path.dirname(__file__),
"initializationOptions": {},
},
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
assert server.workspace is not None
assert "notebookDocumentSync" in response["capabilities"].keys()
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_notebook_document__did_open(
client_server_pair,
):
client, server = client_server_pair
client._endpoint.request(
"initialize",
{
"processId": 1234,
"rootPath": os.path.dirname(__file__),
"initializationOptions": {},
},
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didOpen",
{
"notebookDocument": {
"uri": "notebook_uri",
"notebookType": "jupyter-notebook",
"cells": [
{
"kind": NotebookCellKind.Code,
"document": "cell_1_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_2_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_3_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_4_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_5_uri",
},
],
},
# Test as many edge cases as possible for the diagnostics message
"cellTextDocuments": [
{
"uri": "cell_1_uri",
"languageId": "python",
"text": "",
},
{
"uri": "cell_2_uri",
"languageId": "python",
"text": "\n",
},
{
"uri": "cell_3_uri",
"languageId": "python",
"text": "\nimport sys\n\nabc\n\n",
},
{
"uri": "cell_4_uri",
"languageId": "python",
"text": "x",
},
{
"uri": "cell_5_uri",
"languageId": "python",
"text": "y\n",
},
],
},
)
wait_for_condition(lambda: mock_notify.call_count >= 5)
expected_call_args = [
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_1_uri",
"diagnostics": [],
},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_2_uri",
"diagnostics": [],
},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_3_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 1, "character": 0},
"end": {"line": 1, "character": 11},
},
"message": "'sys' imported but unused",
"severity": 2,
},
{
"source": "pyflakes",
"range": {
"start": {"line": 3, "character": 0},
"end": {"line": 3, "character": 4},
},
"message": "undefined name 'abc'",
"severity": 1,
},
{
"source": "pycodestyle",
"range": {
"start": {"line": 1, "character": 0},
"end": {"line": 1, "character": 11},
},
"message": "E303 too many blank lines (4)",
"code": "E303",
"severity": 2,
},
],
},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_4_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 2},
},
"message": "undefined name 'x'",
"severity": 1,
},
],
},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_5_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 2},
},
"message": "undefined name 'y'",
"severity": 1,
},
],
},
),
]
mock_notify.assert_has_calls(expected_call_args)
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_notebook_document__did_change(
client_server_pair,
):
client, server = client_server_pair
client._endpoint.request(
"initialize",
{
"processId": 1234,
"rootPath": os.path.dirname(__file__),
"initializationOptions": {},
},
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
# Open notebook
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didOpen",
{
"notebookDocument": {
"uri": "notebook_uri",
"notebookType": "jupyter-notebook",
"cells": [
{
"kind": NotebookCellKind.Code,
"document": "cell_1_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_2_uri",
},
],
},
"cellTextDocuments": [
{
"uri": "cell_1_uri",
"languageId": "python",
"text": "import sys",
},
{
"uri": "cell_2_uri",
"languageId": "python",
"text": "",
},
],
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 3
for uri in ["cell_1_uri", "cell_2_uri", "notebook_uri"]:
assert uri in server.workspace.documents
assert len(server.workspace.get_document("notebook_uri").cells) == 2
expected_call_args = [
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_1_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 11},
},
"message": "'sys' imported but unused",
"severity": 2,
}
],
},
),
call(
"textDocument/publishDiagnostics",
params={"uri": "cell_2_uri", "diagnostics": []},
),
]
mock_notify.assert_has_calls(expected_call_args)
# Remove second cell
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didChange",
{
"notebookDocument": {
"uri": "notebook_uri",
},
"change": {
"cells": {
"structure": {
"array": {
"start": 1,
"deleteCount": 1,
},
"didClose": [
{
"uri": "cell_2_uri",
}
],
},
}
},
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 2
assert "cell_2_uri" not in server.workspace.documents
assert len(server.workspace.get_document("notebook_uri").cells) == 1
expected_call_args = [
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_1_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 10},
},
"message": "'sys' imported but unused",
"severity": 2,
},
{
"source": "pycodestyle",
"range": {
"start": {"line": 0, "character": 10},
"end": {"line": 0, "character": 10},
},
"message": "W292 no newline at end of file",
"code": "W292",
"severity": 2,
},
],
},
)
]
mock_notify.assert_has_calls(expected_call_args)
# Add second cell
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didChange",
{
"notebookDocument": {
"uri": "notebook_uri",
},
"change": {
"cells": {
"structure": {
"array": {
"start": 1,
"deleteCount": 0,
"cells": [
{
"kind": NotebookCellKind.Code,
"document": "cell_3_uri",
}
],
},
"didOpen": [
{
"uri": "cell_3_uri",
"languageId": "python",
"text": "x",
}
],
},
}
},
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 3
assert "cell_3_uri" in server.workspace.documents
assert len(server.workspace.get_document("notebook_uri").cells) == 2
expected_call_args = [
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_1_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 11},
},
"message": "'sys' imported but unused",
"severity": 2,
}
],
},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_3_uri",
"diagnostics": [
{
"source": "pyflakes",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 1},
},
"message": "undefined name 'x'",
"severity": 1,
},
{
"source": "pycodestyle",
"range": {
"start": {"line": 0, "character": 1},
"end": {"line": 0, "character": 1},
},
"message": "W292 no newline at end of file",
"code": "W292",
"severity": 2,
},
],
},
),
]
mock_notify.assert_has_calls(expected_call_args)
# Edit second cell
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didChange",
{
"notebookDocument": {
"uri": "notebook_uri",
},
"change": {
"cells": {
"textContent": [
{
"document": {
"uri": "cell_3_uri",
},
"changes": [{"text": "sys.path"}],
}
]
}
},
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
expected_call_args = [
call(
"textDocument/publishDiagnostics",
params={"uri": "cell_1_uri", "diagnostics": []},
),
call(
"textDocument/publishDiagnostics",
params={
"uri": "cell_3_uri",
"diagnostics": [
{
"source": "pycodestyle",
"range": {
"start": {"line": 0, "character": 8},
"end": {"line": 0, "character": 8},
},
"message": "W292 no newline at end of file",
"code": "W292",
"severity": 2,
}
],
},
),
]
mock_notify.assert_has_calls(expected_call_args)
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_notebook__did_close(
client_server_pair,
):
client, server = client_server_pair
client._endpoint.request(
"initialize",
{
"processId": 1234,
"rootPath": os.path.dirname(__file__),
"initializationOptions": {},
},
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
# Open notebook
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didOpen",
{
"notebookDocument": {
"uri": "notebook_uri",
"notebookType": "jupyter-notebook",
"cells": [
{
"kind": NotebookCellKind.Code,
"document": "cell_1_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_2_uri",
},
],
},
"cellTextDocuments": [
{
"uri": "cell_1_uri",
"languageId": "python",
"text": "import sys",
},
{
"uri": "cell_2_uri",
"languageId": "python",
"text": "",
},
],
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 3
for uri in ["cell_1_uri", "cell_2_uri", "notebook_uri"]:
assert uri in server.workspace.documents
# Close notebook
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didClose",
{
"notebookDocument": {
"uri": "notebook_uri",
},
"cellTextDocuments": [
{
"uri": "cell_1_uri",
},
{
"uri": "cell_2_uri",
},
],
},
)
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 0
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_notebook_definition(client_server_pair):
client, server = client_server_pair
client._endpoint.request(
"initialize",
{
"processId": 1234,
"rootPath": os.path.dirname(__file__),
"initializationOptions": {},
},
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
# Open notebook
with patch.object(server._endpoint, "notify") as mock_notify:
client._endpoint.notify(
"notebookDocument/didOpen",
{
"notebookDocument": {
"uri": "notebook_uri",
"notebookType": "jupyter-notebook",
"cells": [
{
"kind": NotebookCellKind.Code,
"document": "cell_1_uri",
},
{
"kind": NotebookCellKind.Code,
"document": "cell_2_uri",
},
],
},
"cellTextDocuments": [
{
"uri": "cell_1_uri",
"languageId": "python",
"text": "y=2\nx=1",
},
{
"uri": "cell_2_uri",
"languageId": "python",
"text": "x",
},
],
},
)
# wait for expected diagnostics messages
wait_for_condition(lambda: mock_notify.call_count >= 2)
assert len(server.workspace.documents) == 3
for uri in ["cell_1_uri", "cell_2_uri", "notebook_uri"]:
assert uri in server.workspace.documents
future = client._endpoint.request(
"textDocument/definition",
{
"textDocument": {
"uri": "cell_2_uri",
},
"position": {"line": 0, "character": 1},
},
)
result = future.result(CALL_TIMEOUT_IN_SECONDS)
assert result == [
{
"uri": "cell_1_uri",
"range": {
"start": {"line": 1, "character": 0},
"end": {"line": 1, "character": 1},
},
}
]
|
d3a45e8686982d0ec275cd1f241ec5cedb67a43b
|
e35dfcd6dbc5153a465f07b51980a8d38c8fb729
|
/newm/gestures/provider/__init__.py
|
1c5a813d5db163d87e7916368575e82efb28c056
|
[
"MIT"
] |
permissive
|
jbuchermn/newm
|
3f51e05cc86d43474d311dda205b6a1c3b210a8f
|
d120fcc390eba70593aecfafbafefe8647fd5c92
|
refs/heads/master
| 2023-06-09T20:39:15.450449
| 2023-06-03T10:58:17
| 2023-06-03T10:58:17
| 336,638,297
| 952
| 49
|
MIT
| 2023-04-22T01:49:59
| 2021-02-06T21:16:57
|
Python
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
__init__.py
|
from .provider import GestureProvider
from .c_gestures import CGestureProvider
from .pyevdev_provider import PyEvdevGestureProvider
|
39760c778c241daa3016ace7525df499d7b4e81e
|
5d180bd059521006615d10fa0d1ce097ba50217e
|
/end2you/models/model_provider.py
|
ee548cfe4e0ec079837f1a4fe9e47ff3d41f0f2c
|
[
"BSD-3-Clause"
] |
permissive
|
end2you/end2you
|
618dce1c9cf739a280deff7ebfbae718a92997ac
|
29657c0b0f3952dd2e817bdfe8253f76800c2342
|
refs/heads/master
| 2022-09-16T11:44:41.916052
| 2022-07-06T22:15:40
| 2022-07-06T22:15:40
| 116,163,081
| 101
| 23
|
BSD-3-Clause
| 2022-08-24T11:58:20
| 2018-01-03T17:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
model_provider.py
|
import torch
import torch.nn as nn
from .audio import AudioRNNModel
from .visual import VisualRNNModel
from .multimodal import AudioVisualRNNModel
def get_model(model:str = 'audio', *args, **kwargs):
""" Factory method to provide a model of choice
(`audio`, `visual`, `audiovisual`).
Args:
model (str): Model to use.
"""
return {
'audio': AudioRNNModel,
'visual': VisualRNNModel,
'audiovisual': AudioVisualRNNModel
}[model](*args, **kwargs)
|
23a364d4c3af35c1c384e5b32813a371e0dbb075
|
d46844ac1c4230579d6c87d800e07fb41bc99592
|
/pwncat/modules/linux/enumerate/creds/pam.py
|
f6cebbc0a80ca2359ea83f9a0fe8e97d06fc16d7
|
[
"MIT"
] |
permissive
|
calebstewart/pwncat
|
14ade3e424fb70ce3e62b8b5c5053959515799e7
|
37f04d4e16ff47c7fd70e95162f9fccd327cca7e
|
refs/heads/master
| 2023-08-14T04:27:04.773361
| 2022-03-21T20:35:00
| 2022-03-21T20:35:00
| 261,925,766
| 2,177
| 267
|
MIT
| 2023-05-19T04:33:17
| 2020-05-07T02:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
pam.py
|
#!/usr/bin/env python3
from pwncat.facts import PotentialPassword
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
"""
TODO: This module is specifically used to check if we have passwords set
from previously running a paired PAM persistence backdoor. If the persistence
isn't in place already, there is no reason to run this enumeration module.
The persistence module has not been re-implemented in the new platforms
framework so this can't be updated just yet.
"""
class Module(EnumerateModule):
"""
Exfiltrate logged passwords from the pam-based persistence
module. This persistence module logs all attempted passwords
for all users in a known location. We read this file and yield
all passwords we have collected.
"""
PLATFORM = [Linux]
SCHEDULE = Schedule.ALWAYS
PROVIDES = ["creds.password"]
def enumerate(self, session):
# Ensure the user database is already retrieved
session.find_user(uid=0)
for implant in session.run("enumerate", types=["implant.*"]):
if implant.source != "linux.implant.pam":
continue
# Just in case we have multiple of the same password logged
observed = []
try:
with session.platform.open(implant.log, "r") as filp:
for lineno, line in enumerate(filp):
line = line.rstrip("\n")
if line in observed:
continue
user, *password = line.split(":")
password = ":".join(password)
try:
# Check for valid user name
user_info = session.find_user(name=user)
except KeyError:
continue
observed.append(line)
yield PotentialPassword(
self.name, password, implant.log, lineno, user_info.id
)
except (FileNotFoundError, PermissionError):
pass
|
341f2769e029b244d3a27623926f1e618743de66
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/usageAssignment_after.py
|
51abce182ad827c9a134562840104e26a4cc3f66
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
usageAssignment_after.py
|
def m():
print(1)
class A:
pass
b = A()
a = m()
|
e41b48d2b975ca66c1c0de7be8e287cc0fd28509
|
dd317f56cd0d93b66e174e93691dd09d4e191d30
|
/doc/examples/parametrized.py
|
dbf8fbbaf4ce19c271f2cb96599ec4d78dedfd23
|
[
"Apache-2.0"
] |
permissive
|
RKrahl/pytest-dependency
|
32f87a10b86d21c7ec201153570e41144ca443d0
|
cab2f65ced816939a9041b9e67169073ef0ee412
|
refs/heads/develop
| 2023-04-27T13:06:40.853328
| 2022-02-17T17:06:21
| 2022-02-17T17:06:21
| 56,441,594
| 131
| 33
|
Apache-2.0
| 2022-03-25T10:00:08
| 2016-04-17T14:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
parametrized.py
|
import pytest
@pytest.mark.parametrize("x,y", [
pytest.param(0, 0, marks=pytest.mark.dependency(name="a1")),
pytest.param(0, 1, marks=[pytest.mark.dependency(name="a2"),
pytest.mark.xfail]),
pytest.param(1, 0, marks=pytest.mark.dependency(name="a3")),
pytest.param(1, 1, marks=pytest.mark.dependency(name="a4"))
])
def test_a(x,y):
assert y <= x
@pytest.mark.parametrize("u,v", [
pytest.param(1, 2, marks=pytest.mark.dependency(name="b1",
depends=["a1", "a2"])),
pytest.param(1, 3, marks=pytest.mark.dependency(name="b2",
depends=["a1", "a3"])),
pytest.param(1, 4, marks=pytest.mark.dependency(name="b3",
depends=["a1", "a4"])),
pytest.param(2, 3, marks=pytest.mark.dependency(name="b4",
depends=["a2", "a3"])),
pytest.param(2, 4, marks=pytest.mark.dependency(name="b5",
depends=["a2", "a4"])),
pytest.param(3, 4, marks=pytest.mark.dependency(name="b6",
depends=["a3", "a4"]))
])
def test_b(u,v):
pass
@pytest.mark.parametrize("w", [
pytest.param(1, marks=pytest.mark.dependency(name="c1",
depends=["b1", "b2", "b6"])),
pytest.param(2, marks=pytest.mark.dependency(name="c2",
depends=["b2", "b3", "b6"])),
pytest.param(3, marks=pytest.mark.dependency(name="c3",
depends=["b2", "b4", "b6"]))
])
def test_c(w):
pass
|
7a2ab93146f91df457da3448fc440b2565e903d2
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/JoeSecurity/Integrations/JoeSecurity/JoeSecurity_test.py
|
82b5b9d56dc07ee782ec176386308ed8fff38568
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
JoeSecurity_test.py
|
def mock_http_post(suffix_url, data=None, files=None, parse_json=True):
return {'data': {'webids': [files]}}
def mock_info_request(web_id):
return {'data': web_id}
def mock_analysis_to_entry(title, info):
return info
def test_analyse_sample_file_request(mocker):
"""
Given:
A file with a backslash in it's name
When:
joe-analysis-submit-sample is running
Then:
Make sure that the file name changed with a regular slash ('abc\def.txt' => 'abc/def.txt')
"""
import demistomock as demisto
mocker.patch.object(demisto, 'params', return_value={'url': 'www.example.com'})
mocker.patch('JoeSecurity.http_post', side_effect=mock_http_post)
mocker.patch('JoeSecurity.info_request', side_effect=mock_info_request)
mocker.patch('JoeSecurity.analysis_to_entry', side_effect=mock_analysis_to_entry)
mocker.patch.object(demisto, 'getFilePath', return_value={'path': 'README.md', 'name': 'abc\def.txt'})
from JoeSecurity import analyse_sample_file_request
result = analyse_sample_file_request(123456, False, True, comments='', systems='')
assert result.get('sample')[0] == 'abc/def.txt'
|
a984c8b2c6278b3140f5c8b10c8f58ecfa47ab76
|
5c3296ff65e5a07852ff9dad1cc5e07991d08270
|
/lingvo/model_registry_test.py
|
b76a6c445ce83da3680068983f9fdbe01d888394
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/lingvo
|
dee164ef6e69edb352f2e855660b9b5227ddcf6f
|
c00a74b260fcf6ba11199cc4a340c127d6616479
|
refs/heads/master
| 2023-09-01T22:08:55.758781
| 2023-08-30T00:50:34
| 2023-08-30T00:51:26
| 142,219,189
| 2,963
| 485
|
Apache-2.0
| 2023-09-07T00:52:48
| 2018-07-24T22:30:28
|
Python
|
UTF-8
|
Python
| false
| false
| 10,156
|
py
|
model_registry_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_registry."""
from absl.testing import flagsaver
from absl.testing import parameterized
from lingvo import model_registry
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import program
from lingvo.core import test_utils
FLAGS = tf.flags.FLAGS
@model_registry.RegisterSingleTaskModel
class DummyModel(base_model_params.SingleTaskModelParams):
def Train(self):
p = base_input_generator.BaseInputGenerator.Params()
p.name = 'Train'
return p
def Dev(self):
p = base_input_generator.BaseInputGenerator.Params()
p.name = 'Dev'
return p
def Test(self):
p = base_input_generator.BaseInputGenerator.Params()
p.name = 'Test'
return p
def Task(self):
p = base_model.BaseTask.Params()
p.name = 'DummyModel'
return p
def Dataset(self):
p = base_input_generator.BaseInputGenerator.Params()
p.name = 'Dataset'
return p
def Task_Dataset(self):
p = self.Task()
p.name = 'DatasetSpecificTask'
return p
def ProgramSchedule(self):
p = program.SimpleProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1000,
eval_dataset_names=['Dev', 'Test'],
eval_steps_per_loop=1,
decode_steps_per_loop=1)
p.train_executions_per_eval = 0
return p
@model_registry.RegisterSingleTaskModel
class DummyModelWithInitRules(DummyModel):
def Task(self):
p = super().Task()
p.train.init_from_checkpoint_rules = {
'/ckpt/path': ([('abc', 'def')], []),
}
return p
class ModelRegistryTest(test_utils.TestCase, parameterized.TestCase):
def setUp(self):
FLAGS.model_params_override = ''
def testGetClass(self):
mp_cls = model_registry.GetClass('test.DummyModel')
mp = mp_cls()
self.assertEqual('Train', mp.Train().name)
self.assertEqual('Dev', mp.Dev().name)
self.assertEqual('Test', mp.Test().name)
self.assertIsNotNone(mp.Task())
self.assertIsNotNone(mp.Model())
with self.assertRaises(LookupError):
# Not yet registered.
model_registry.GetClass('something.does.not.exist')
def testGetParams(self):
cfg = model_registry.GetParams('test.DummyModel', 'Test')
self.assertIsNotNone(cfg)
self.assertEqual(DummyModel().Test(), cfg.input)
cfg.input = None
# Registered version adds model source info but direct does not.
cfg.model = None
self.assertEqual(DummyModel().Model(), cfg)
cfg = model_registry.GetParams('test.DummyModel', 'Dataset')
self.assertIsNotNone(cfg)
self.assertEqual(DummyModel().Task_Dataset(), cfg.task)
with self.assertRaises(LookupError):
# Not yet registered.
cfg = model_registry.GetParams('something.does.not.exist', 'Test')
with self.assertRaises(base_model_params.DatasetError):
cfg = model_registry.GetParams('test.DummyModel', 'UnknownDataset')
def testGetParamsCanOverrideWithFlags(self):
cfg = model_registry.GetParams('test.DummyModel', 'Train')
FLAGS.model_params_override = (
'train.max_steps: 10; train.ema_decay: 0.9\n'
'train.init_from_checkpoint_rules : {"ckpt": (["abc", "def"], [])}\n')
cfg2 = model_registry.GetParams('test.DummyModel', 'Train')
self.assertNotEqual(cfg.train.max_steps, 10)
self.assertEqual(cfg2.train.max_steps, 10)
self.assertNotEqual(cfg.train.ema_decay, 0.9)
self.assertEqual(cfg2.train.ema_decay, 0.9)
self.assertNotEqual(cfg.train.init_from_checkpoint_rules,
{'ckpt': (['abc', 'def'], [])})
self.assertEqual(cfg2.train.init_from_checkpoint_rules,
{'ckpt': (['abc', 'def'], [])})
def testGetParamsOverrideWithInitCheckpointPath(self):
# Without override, default value is None.
cfg = model_registry.GetParams('test.DummyModel', 'Train')
self.assertIsNone(cfg.task.train.init_from_checkpoint_override)
# Override ckpt path from empty to flag.
FLAGS.model_params_override = (
'task.train.init_from_checkpoint_override:/new/ckpt/path')
cfg1 = model_registry.GetParams('test.DummyModel', 'Train')
self.assertEqual(cfg1.task.train.init_from_checkpoint_override,
'/new/ckpt/path')
# Unset checkpoint path.
FLAGS.model_params_override = ('task.train.init_from_checkpoint_override:')
cfg2 = model_registry.GetParams('test.DummyModelWithInitRules', 'Train')
self.assertEqual(cfg2.task.train.init_from_checkpoint_override, '')
def testGetParamsCanOverrideWithFlagsRaises(self):
FLAGS.model_params_override = 'task.SOME_UNKNOWN_PARAM : 10'
with self.assertRaises(AttributeError):
_ = model_registry.GetParams('test.DummyModel', 'Train')
def testGetParamsCanOverrideWithFlagsBadSyntax(self):
FLAGS.model_params_override = 'task.SOME_UNKNOWN_PARAM=10'
with self.assertRaises(ValueError):
_ = model_registry.GetParams('test.DummyModel', 'Train')
def testGetParamsCanOverrideInputParamsWithFlags(self):
cfg = model_registry.GetParams('test.DummyModel', 'Train')
FLAGS.model_params_override = 'input.num_samples: 100'
cfg2 = model_registry.GetParams('test.DummyModel', 'Train')
self.assertNotEqual(cfg.input.num_samples, 100)
self.assertEqual(cfg2.input.num_samples, 100)
def _CheckProgramParams(self, eval_programs, expt_eval_dev, expt_eval_test,
expt_decode_dev, expt_decode_test):
eval_dev, eval_test, decode_dev, decode_test = 0, 0, 0, 0
for eval_program in eval_programs:
if eval_program.dataset_name == 'Dev':
if issubclass(eval_program.cls, program.EvalProgram):
self.assertEqual(eval_program.name, 'eval_tpu')
eval_dev += 1
elif issubclass(eval_program.cls, program.DecodeProgram):
self.assertEqual(eval_program.name, 'decode_tpu')
decode_dev += 1
elif eval_program.dataset_name == 'Test':
if issubclass(eval_program.cls, program.EvalProgram):
self.assertEqual(eval_program.name, 'eval_tpu')
eval_test += 1
elif issubclass(eval_program.cls, program.DecodeProgram):
self.assertEqual(eval_program.name, 'decode_tpu')
decode_test += 1
self.assertEqual(eval_dev, expt_eval_dev)
self.assertEqual(eval_test, expt_eval_test)
self.assertEqual(decode_dev, expt_decode_dev)
self.assertEqual(decode_test, expt_decode_test)
@parameterized.named_parameters(
('Basic',),
('DevOnly', 'Dev', 0, 3, -1),
('OverrideExecutions', None, 1, None, None),
('DecodeOnly', None, None, 0, None),
('EvalOnly', None, None, None, 0),
)
def testProgramSchedule(self,
dataset_list_override=None,
train_executions_per_eval_override=None,
eval_steps_per_loop_override=None,
decode_steps_per_loop_override=None):
with flagsaver.flagsaver(
executor_datasets_to_eval=dataset_list_override,
executor_train_executions_per_eval=train_executions_per_eval_override,
executor_eval_steps_per_loop=eval_steps_per_loop_override,
executor_decode_steps_per_loop=decode_steps_per_loop_override):
ps_params = model_registry.GetProgramSchedule('test.DummyModel')
if dataset_list_override is not None:
self.assertAllEqual(ps_params.dataset_names,
dataset_list_override.split(';'))
else:
self.assertAllEqual(ps_params.dataset_names, ['Dev', 'Test'])
if train_executions_per_eval_override is not None:
self.assertEqual(ps_params.train_executions_per_eval,
train_executions_per_eval_override)
else:
self.assertEqual(ps_params.train_executions_per_eval, 0)
# Assume only Dev and Test are avaiable eval datasets.
eval_dev, eval_test, decode_dev, decode_test = 0, 0, 0, 0
if dataset_list_override is None or 'Dev' in dataset_list_override:
if eval_steps_per_loop_override != 0:
eval_dev += 1
if decode_steps_per_loop_override != 0:
decode_dev += 1
if dataset_list_override is None or 'Test' in dataset_list_override:
if eval_steps_per_loop_override != 0:
eval_test += 1
if decode_steps_per_loop_override != 0:
decode_test += 1
self.assertLen(ps_params.eval_programs,
eval_dev + decode_dev + eval_test + decode_test)
self._CheckProgramParams(ps_params.eval_programs, eval_dev, eval_test,
decode_dev, decode_test)
def testModelParamsIncludeSourceInfo(self):
path = 'lingvo/model_registry_test.py'
# NOTE: Only the registered version has source info.
self.assertIn(path,
model_registry.GetParams('test.DummyModel', 'Test').model)
def testDoubleRegister(self):
def CreateDuplicate():
# pylint: disable=unused-variable
# pylint: disable=function-redefined
@model_registry.RegisterSingleTaskModel
class DummyDupl(DummyModel):
pass
@model_registry.RegisterSingleTaskModel
class DummyDupl(DummyModel):
pass
# pylint: enable=unused-variable
# pylint: enable=function-redefined
with self.assertRaises(ValueError):
CreateDuplicate()
if __name__ == '__main__':
test_utils.main()
|
e3eb4b0d6b4237236652eaf1afac7368632c274d
|
2883819589b815a96e992cf184a28a9cbafc19d5
|
/Versi Lama 20 - teknik looping/tekniklooping.py
|
080a28929be619219b9b1c53c32386b5fb239775
|
[] |
no_license
|
kelasterbuka/Python3.x_Dasar_Programming
|
f944cbfdd990b50ffdb5c0abf68033d5256b5cad
|
0fd9299817ab5804d16d7981707d589b36a962e1
|
refs/heads/master
| 2023-01-08T21:13:39.395752
| 2022-11-02T07:46:40
| 2022-11-02T07:46:40
| 123,400,518
| 453
| 361
| null | 2023-02-02T11:48:48
| 2018-03-01T07:44:56
|
Python
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
tekniklooping.py
|
# teknik looping
nama_band = ['Payung Teduh',
'Fourtwnty',
'Dialog Dini Hari',
'Mr. Sonjaya',
'Parahyena',
'Syahrini']
kumpulan_lagu = ['Akad',
'Zona Nyaman',
'Rumahku',
'Sang Filsuf',
'Sindoro',
'Jodohku']
# enumerate
for index,band in enumerate(nama_band):
print(index,':',band)
# zip
for band,lagu in zip(nama_band,kumpulan_lagu):
print(band,'menyanyikan lagu yang berjudul:',lagu)
# set
playlist = {'baby baby', 'ada apa dengan cinta', 'cenat-cenut', 'jaran goyang', 'jaran goyang', 'gorgom', 'kuda', 'kucing'}
for lagu in sorted(playlist):
print(lagu)
# dictionary
print('='*100)
playlist2 = {'Payung Teduh': 'akad',
'Fourtwnty':'Zona Nyaman',
'Dialog Dini Hari':'Rumahku',
}
for i,v in playlist2.items():
print(i,'lagunya:',v)
for i in reversed(range(1,10,1)):
print(i)
|
6a09f45bf1fa088cfe074ffd7f54b14ef8e4e0ea
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/api/apps.py
|
ddb7ce775afb4d92ab9c1d6b0e13ddac63cd3052
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
apps.py
|
from django.apps import AppConfig
from django.conf import settings
from django.db.models.signals import post_save
class APIConfig(AppConfig):
name = 'c3nav.api'
def ready(self):
from c3nav.api.signals import remove_tokens_on_user_save
post_save.connect(remove_tokens_on_user_save, sender=settings.AUTH_USER_MODEL)
|
c933e830e91b5281cf582b5ed9f0cf44fa4c419e
|
47776565cddf44cbca4a851943ef942867da57e1
|
/ITMO_FS/wrappers/randomized/__init__.py
|
c451a086478f30d49a0645bc554a2f378de22e61
|
[
"BSD-3-Clause"
] |
permissive
|
ctlab/ITMO_FS
|
3466264895d79d51c98d2dfa843f126c69a51172
|
a2e61e2fabb9dfb34d90a1130fc7f5f162a2c921
|
refs/heads/master
| 2023-03-09T23:34:23.185421
| 2023-02-22T20:02:16
| 2023-02-22T20:02:16
| 243,206,210
| 120
| 33
|
BSD-3-Clause
| 2023-02-22T20:02:18
| 2020-02-26T08:13:44
|
Python
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
__init__.py
|
from .HillClimbing import HillClimbingWrapper
from .TPhMGWO import TPhMGWO
from .SimulatedAnnealing import SimulatedAnnealing
|
fb5b624a0354f6a74d6842f4b53f53d1f63ef09c
|
50177ddaa15d7a6c04d5669130f43fec383bf7f4
|
/drf_extra_fields/relations.py
|
e40632d56a48a4e9e7395b57b86fb22d81004a4f
|
[
"Apache-2.0"
] |
permissive
|
Hipo/drf-extra-fields
|
a1b561ec3c0f7ae9ab5e9c52ef016ac5c163cc5c
|
8c18a7542c8a38fe3dccd1874a74a38410aa3a7f
|
refs/heads/master
| 2023-08-21T18:05:30.125318
| 2023-08-08T18:13:21
| 2023-08-08T18:13:21
| 21,973,580
| 635
| 123
|
Apache-2.0
| 2023-08-08T18:13:22
| 2014-07-18T08:35:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,814
|
py
|
relations.py
|
from collections import OrderedDict
from django.utils.module_loading import import_string
from rest_framework.relations import (
PrimaryKeyRelatedField, SlugRelatedField, MANY_RELATION_KWARGS,
ManyRelatedField as DRFManyRelatedField
)
class ReadSourceMixin:
"""
This mixin override get_attribute method and set read_source attribute
to source attribute if read_source attribute setted. For the purpose of
not want to effect of write operation, we don't override bind method.
"""
class ManyRelatedField(DRFManyRelatedField):
def get_attribute(self, instance):
if self.child_relation.read_source:
self.source = self.child_relation.read_source
self.bind(self.field_name, self.parent)
return super().get_attribute(instance)
def __init__(self, **kwargs):
self.read_source = kwargs.pop("read_source", None)
super().__init__(**kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
if not kwargs.get("read_source", None):
return super().many_init(*args, **kwargs)
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs:
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return cls.ManyRelatedField(**list_kwargs)
def get_attribute(self, instance):
if self.read_source:
self.source = self.read_source
self.bind(self.field_name, self.parent)
return super().get_attribute(instance)
class PresentableRelatedFieldMixin(ReadSourceMixin):
def __init__(self, **kwargs):
self.presentation_serializer = kwargs.pop("presentation_serializer", None)
self.presentation_serializer_kwargs = kwargs.pop(
"presentation_serializer_kwargs", dict()
)
assert self.presentation_serializer is not None, (
self.__class__.__name__
+ " must provide a `presentation_serializer` argument"
)
super().__init__(**kwargs)
def use_pk_only_optimization(self):
"""
Instead of sending pk only object, return full object. The object already retrieved from db by drf.
This doesn't cause an extra query.
It even might save from making an extra query on serializer.to_representation method.
Related source codes:
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132
"""
return False
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([(item.pk, self.display_value(item)) for item in queryset])
def to_representation(self, data):
if isinstance(self.presentation_serializer, str):
self.presentation_serializer = import_string(self.presentation_serializer)
return self.presentation_serializer(
data, context=self.context, **self.presentation_serializer_kwargs
).data
class PresentablePrimaryKeyRelatedField(
PresentableRelatedFieldMixin, PrimaryKeyRelatedField
):
"""
Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object.
"""
pass
class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField):
"""
Override SlugRelatedField to represent serializer data instead of a slug field of the object.
"""
pass
|
f2301eecf88234ef043eabd59a0e69dd2af8cf19
|
24e6c105fb91da3454e0cc8833d3a9ba30f45896
|
/EduKTM/utils/torch/functional.py
|
1ae064897266a0161cfd1b4bb97156589acc781b
|
[
"Apache-2.0"
] |
permissive
|
bigdata-ustc/EduKTM
|
df2b89f924df6e056b735dcc70a349c38b2ba204
|
c9912f0d29830b75b192bb63cdc5a4400f476300
|
refs/heads/main
| 2023-04-13T23:29:30.720104
| 2023-03-21T13:05:18
| 2023-03-21T13:05:18
| 348,569,820
| 130
| 51
|
Apache-2.0
| 2023-03-21T13:05:20
| 2021-03-17T03:37:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
functional.py
|
# coding: utf-8
# 2021/5/24 @ tongshiwei
__all__ = ["pick", "tensor2list", "length2mask", "get_sequence_mask", "sequence_mask"]
import torch
from torch import Tensor
def pick(tensor, index, axis=-1):
return torch.gather(tensor, axis, index.unsqueeze(axis)).squeeze(axis)
def tensor2list(tensor: Tensor):
return tensor.cpu().tolist()
def length2mask(length, max_len, valid_mask_val, invalid_mask_val):
mask = []
if isinstance(valid_mask_val, Tensor):
valid_mask_val = tensor2list(valid_mask_val)
if isinstance(invalid_mask_val, Tensor):
invalid_mask_val = tensor2list(invalid_mask_val)
if isinstance(length, Tensor):
length = tensor2list(length)
for _len in length:
mask.append([valid_mask_val] * _len + [invalid_mask_val] * (max_len - _len))
return torch.tensor(mask)
def get_sequence_mask(shape, sequence_length, axis=1):
assert axis <= len(shape)
mask_shape = shape[axis + 1:]
valid_mask_val = torch.ones(mask_shape)
invalid_mask_val = torch.zeros(mask_shape)
max_len = shape[axis]
return length2mask(sequence_length, max_len, valid_mask_val, invalid_mask_val)
def sequence_mask(tensor: Tensor, sequence_length, axis=1):
mask = get_sequence_mask(tensor.shape, sequence_length, axis).to(tensor.device)
return tensor * mask
|
6b7e80f6b0d0209a735c95f484259f94352cccec
|
7ea5cb272f3acafce0f91e6fd29213cedccd7ef7
|
/data_pre/reg_preprocess_example/oai_longitude_reg.py
|
4b974cbb7461f064f987afa5afe8d13ab4837147
|
[
"Apache-2.0"
] |
permissive
|
uncbiag/easyreg
|
e4434a5d7905d24e47cae9dfc9530a63afc86521
|
55339813d47c86a79268831f5b353924a2326fb3
|
refs/heads/master
| 2023-01-21T12:09:42.809367
| 2023-01-08T17:04:26
| 2023-01-08T17:04:26
| 109,648,136
| 139
| 11
|
NOASSERTION
| 2023-01-08T17:04:27
| 2017-11-06T04:35:37
|
Python
|
UTF-8
|
Python
| false
| false
| 25,790
|
py
|
oai_longitude_reg.py
|
import sys
import os
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('../easyreg'))
import numpy as np
from functools import reduce
from easyreg.reg_data_utils import make_dir
import random
import SimpleITK as sitk
from multiprocessing import Pool
"""
######################################### Section 1. Raw Data Organization ###############################################
data root: /playpen/zhenlinx/Data/OAI_segmentation/
The images were saved as nifti format at ./Nifti_6sets_rescaled
The list file are images_6sets_left.txt and images_6sets_right.txt. The images file name are ordered by patient IDs but not ordered by time for each patient.
For a file name like 9000099_20050712_SAG_3D_DESS_LEFT_10424405_image.nii.gz
9000099 is the patient ID, 20050712 is the scan date,
SAG_3D_DESS is the image modality,
LEFT means left knee,
and 10424405 is the image id.
Segmentations for images_6sets_right predicted by UNetx2 were saved at
/playpen/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038
####################################### Section 2. Processed Data Organization #############################################
data root: /playpen/zyshen/summer/oai_registration/data
The patient id will be saved in patient_id.txt
(to do):The modality list will be saved in modality.txt ( where each line will be organized as following MRI mod1 #newline CT mod2 ...........)
the patient slices list will be save in folder "patient_slice"
./patient_slice/ : each patient_id is a separate folder
./patient_slice/idxxxxxxx/: each modality is a separate folder
./patient_slice/idxxxxxxx/mod1/: each specificity is a separate folder ie. left, right
./patient_slice/idxxxxxxx/mod1/spec1/ paths of slice labels will be recorded in "slice_label.txt", each line has a slice path and corresponded label path
######################################## Section 3. Code Organization ####################################################
class DataPrepare:
class Dataprepare are specificed to oai_dataset, which will transfer Raw Data Organization into Processed Data Organization
object variable included:
raw_data_path, output_data_path,
function_call_outside:
prepare_data()
function_call_inside:
__factor_file(file_name)
__factor_file_list()
__build_and_write_in()
class Patient:
class Patient are initialized from each patient_id folder, so it need the path of patient_id folder as input
object varaible included:
basic_information:
patient_id, modality (tuple), specificity(tuple), patient_slices_path_dic ([modality][specificity]: slice_list) (dict),
patient_slices_num_dic (dict)
annotation_information:
has_label, label_is_complete, patient_slices_label_path_dic (dict),
function called outside:
check_if_taken(self, modality=None, specificity=None, len_time_range=None, has_label=None)
get_slice_list(modality,specificity)
get_label_path_list(modality,specificity)
get_slice_num(modality,specificity)
function called inside:
__init__()
class Patients:
class Patients are initialized from patient_slice folder, so it need the path of patient_slice folder as input
this class has a list of Patient class, and can set some condtions in order to filter the patients
object varaible included:
patients_id_list (list), patients( list of class Patient)
function called outside:
get_that_patient(self,patient_id)
get_filtered_patients_list(self,modality=None, specificity=None, has_label=None, num_of_patients= -1, len_time_range=None, use_random=False):
to do:
get_patients_statistic_distribution(is_modality=False, is_specificity= False, has_label=False)
function call inside:
__read_patients_id_list_from_txt(self)
__init_basic_info
__init_full_info
"""
class Patients(object):
def __init__(self,full_init=False, root_path= ''):
self.full_init = full_init
self.root_path = root_path if len(root_path) else "/playpen/zyshen/summer/oai_registration/reg_0623/data"
self.patients_id_txt_name = 'patient_id.txt'
self.patients_info_folder = 'patient_slice'
self.patients_id_list= []
self.patients = []
if not full_init:
self.__init_basic_info()
else:
self.__init_full_info()
def __init_basic_info(self):
self.__read_patients_id_list_from_txt()
self.patients_num = len(self.patients_id_list)
def __init_full_info(self):
self.__read_patients_id_list_from_txt()
self.patients_num = len(self.patients_id_list)
for patient_id in self.patients_id_list:
patient_info_path = os.path.join(self.root_path, self.patients_info_folder, patient_id)
self.patients.append(Patient(patient_info_path))
def get_all_patients(self):
if self.full_init:
return self.patients
else:
self.__init_full_info()
return self.patients
def get_that_patient(self,patient_id):
assert patient_id in self.patients_id_list
patient_info_path = os.path.join(self.root_path, self.patients_info_folder, patient_id)
patient = Patient(patient_info_path)
return patient
def get_filtered_patients_list(self,modality=None, specificity=None, has_complete_label=None, num_of_patients= -1, len_time_range=None, use_random=False):
index = list(range(self.patients_num))
num_of_patients = num_of_patients if num_of_patients>0 else self.patients_num
filtered_patients_list =[]
if use_random:
random.shuffle(index)
count = 0
for i in index:
if not self.full_init:
patient_id = self.patients_id_list[i]
patient_info_path = os.path.join(self.root_path, self.patients_info_folder, patient_id)
patient = Patient(patient_info_path)
else:
patient = self.patients[i]
modality = patient.modality[0] if modality is None else modality
specificity_tmp = patient.specificity[0] if specificity is None else specificity
if_taken = patient.check_if_taken(modality=modality,specificity=specificity_tmp,has_complete_label=has_complete_label,len_time_range=len_time_range)
if if_taken:
filtered_patients_list.append(patient)
count+=1
if count>= num_of_patients:
break
if len(filtered_patients_list)< num_of_patients:
print("not enough patients meet the filter requirement. We want {} but got {} patients".format(num_of_patients, len(filtered_patients_list)))
return filtered_patients_list
def __read_patients_id_list_from_txt(self):
"""
get the patient id from the txt i.e patient_id.txt
:param file_name:
:return: type list, list of patient id
"""
txt_path = os.path.join(self.root_path, self.patients_id_txt_name)
with open(txt_path, 'r') as f:
content = f.read().splitlines()
if len(content) > 0:
infos = [line.split('\t') for line in content]
self.patients_id_list = [info[0] for info in infos]
self.patients_has_label_list = [info[1]=='annotation_complete' for info in infos]
class Patient():
def __init__(self, path):
# patient_id, modality(set), specificity(set), patient_slices_path_dic([modality][specificity]: slice_list)
self.patient_root_path = path
self.patient_id = -1
self.modality = None
self.specificity = None
self.patient_slices_path_dic = {}
self.patient_slices_num_dic = {}
self.has_label = False
self.label_is_complete = True
self.patient_slices_label_path_dic = {}
self.patient_has_label_dic= {}
self.txt_file_name = 'slice_label.txt'
self.__init_patient_info()
def __init_patient_info(self):
self.patient_id = os.path.split(self.patient_root_path)[1]
modality_list = os.listdir(self.patient_root_path)
specificity_list = os.listdir(os.path.join(self.patient_root_path, modality_list[0]))
self.modality = tuple(modality_list)
self.specificity = tuple(specificity_list)
for mod in self.modality:
for spec in self.specificity:
if mod not in self.patient_slices_path_dic:
self.patient_slices_path_dic[mod]={}
self.patient_slices_label_path_dic[mod]={}
self.patient_has_label_dic[mod]= {}
self.patient_slices_num_dic[mod]={}
self.patient_slices_path_dic[mod][spec], self.patient_slices_label_path_dic[mod][spec] \
= self.__init_path_info(mod, spec)
self.patient_slices_num_dic[mod][spec] = len(self.patient_slices_path_dic[mod][spec])
has_complete_spec_label = True
for label_path in self.patient_slices_label_path_dic[mod][spec]:
if label_path !='None':
self.has_label = True
else:
self.label_is_complete= False
has_complete_spec_label= False
self.patient_has_label_dic[mod][spec] = has_complete_spec_label
def __init_path_info(self,modality, specificity):
txt_path = os.path.join(self.patient_root_path,modality, specificity,self.txt_file_name)
paths = []
with open(txt_path, 'r') as f:
content = f.read().splitlines()
if len(content) > 0:
paths = [line.split('\t') for line in content]
slices_path_list = [path[0] for path in paths]
slices_label_path_list = [path[1] for path in paths]
return slices_path_list,slices_label_path_list
def check_if_taken(self, modality=None, specificity=None, len_time_range=None, has_complete_label=None):
modality_met =True if modality is None else modality in self.modality
specificity_met = True if specificity is None else specificity in self.specificity
has_label_met = True if has_complete_label is None else self.label_is_complete == has_complete_label
if modality not in self.modality or specificity not in self.specificity:
return False
len_time_met =True
if len_time_range is not None:
cur_len_time = self.patient_slices_num_dic[modality][specificity]
len_time_met = len_time_range[0]<= cur_len_time and len_time_range[1]>= cur_len_time
if_taken = modality_met and specificity_met and len_time_met and has_label_met
return if_taken
def get_slice_path_list(self, modality=None, specificity=None):
if modality==None and specificity==None:
return self.patient_slices_path_dic[self.modality[0]][self.specificity[0]]
elif modality in self.modality and specificity in self.specificity:
return self.patient_slices_path_dic[modality][specificity]
else:
print("patient{} doesn't has slice in format {} and {}".format(self.patient_id, modality, specificity))
return []
def get_label_path_list(self,modality=None, specificity=None):
if modality==None and specificity==None:
return self.patient_slices_label_path_dic[self.modality[0]][self.specificity[0]]
elif modality in self.modality and specificity in self.specificity:
return self.patient_slices_label_path_dic[modality][specificity]
else:
print ("patient{} doesn't has label in format {} and {}".format(self.patient_id, modality, specificity))
return []
def get_slice_num(self,modality=None, specificity=None):
if modality==None and specificity==None:
return self.patient_slices_num_dic[self.modality[0]][self.specificity[0]]
elif modality in self.modality and specificity in self.specificity:
return self.patient_slices_num_dic[modality][specificity]
else:
print("patient{} doesn't has slice in format {} and {}".format(self.patient_id, modality, specificity))
return 0
def get_path_for_mod_and_spec(self,mod,spec):
if self.get_slice_num(mod,spec)>0:
path = os.path.join(self.patient_root_path,mod,spec)
return path
else:
return None
def __debug_check_img_sz(file_path_list):
fp_to_del = []
for fp in file_path_list:
img = sitk.ReadImage(fp)
img_shape = sitk.GetArrayFromImage(img).shape
if not img_shape == (160,384,384):
print("!! image size not matched , img:{} sz:{} \n".format(os.path.split(fp)[1], img_shape))
fp_to_del.append(fp)
return fp_to_del
f= __debug_check_img_sz
################################################################
"""
abnormal example list:
!! image size not matched , img:9901199_20090422_SAG_3D_DESS_RIGHT_12800503_image.nii.gz sz:(160, 384, 352)
!! image size not matched , img:9052335_20090126_SAG_3D_DESS_RIGHT_12766414_image.nii.gz sz:(176, 384, 384)
!! image size not matched , img:9163391_20110808_SAG_3D_DESS_LEFT_16613250603_image.nii.gz sz:(159, 384, 384)
!! image size not matched , img:9712762_20090420_SAG_3D_DESS_RIGHT_12583306_image.nii.gz sz:(160, 384, 352)
!! image size not matched , img:9388265_20040405_SAG_3D_DESS_LEFT_10016906_image.nii.gz sz:(176, 384, 384)
!! image size not matched , img:9388265_20040405_SAG_3D_DESS_LEFT_10016903_image.nii.gz sz:(176, 384, 384)
!! image size not matched , img:9938453_20071130_SAG_3D_DESS_RIGHT_12140103_image.nii.gz sz:(159, 384, 384)
!! image size not matched , img:9452305_20070228_SAG_3D_DESS_RIGHT_11633112_image.nii.gz sz:(109, 384, 384)
!! image size not matched , img:9219500_20080326_SAG_3D_DESS_RIGHT_12266509_image.nii.gz sz:(8, 384, 384)
!! image size not matched , img:9011949_20060118_SAG_3D_DESS_LEFT_10667703_image.nii.gz sz:(156, 384, 384)
!! image size not matched , img:9885303_20051212_SAG_3D_DESS_LEFT_10624403_image.nii.gz sz:(155, 384, 384)
!! image size not matched , img:9833782_20090519_SAG_3D_DESS_RIGHT_12802313_image.nii.gz sz:(176, 384, 384)
!! image size not matched , img:9462278_20050524_SAG_3D_DESS_RIGHT_10546912_image.nii.gz sz:(156, 384, 384)
!! image size not matched , img:9126260_20060921_SAG_3D_DESS_RIGHT_11309309_image.nii.gz sz:(66, 384, 384)
!! image size not matched , img:9487462_20081003_SAG_3D_DESS_RIGHT_11495603_image.nii.gz sz:(176, 384, 384)
!! image size not matched , img:9847480_20081007_SAG_3D_DESS_RIGHT_11508512_image.nii.gz sz:(159, 384, 384)
!! image size not matched , img:9020714_20101207_SAG_3D_DESS_RIGHT_16613171935_image.nii.gz sz:(118, 384, 384)
"""
#####################################################################
abnormal_example_list=\
[
"9901199_20090422_SAG_3D_DESS_RIGHT_12800503_image.nii.gz",
"9052335_20090126_SAG_3D_DESS_RIGHT_12766414_image.nii.gz",
"9163391_20110808_SAG_3D_DESS_LEFT_16613250603_image.nii.gz",
"9712762_20090420_SAG_3D_DESS_RIGHT_12583306_image.nii.gz",
"9388265_20040405_SAG_3D_DESS_LEFT_10016906_image.nii.gz",
"9388265_20040405_SAG_3D_DESS_LEFT_10016903_image.nii.gz",
"9938453_20071130_SAG_3D_DESS_RIGHT_12140103_image.nii.gz" ,
"9452305_20070228_SAG_3D_DESS_RIGHT_11633112_image.nii.gz" ,
"9219500_20080326_SAG_3D_DESS_RIGHT_12266509_image.nii.gz",
"9011949_20060118_SAG_3D_DESS_LEFT_10667703_image.nii.gz",
"9885303_20051212_SAG_3D_DESS_LEFT_10624403_image.nii.gz",
"9833782_20090519_SAG_3D_DESS_RIGHT_12802313_image.nii.gz",
"9462278_20050524_SAG_3D_DESS_RIGHT_10546912_image.nii.gz",
"9126260_20060921_SAG_3D_DESS_RIGHT_11309309_image.nii.gz",
"9487462_20081003_SAG_3D_DESS_RIGHT_11495603_image.nii.gz",
"9847480_20081007_SAG_3D_DESS_RIGHT_11508512_image.nii.gz",
"9020714_20101207_SAG_3D_DESS_RIGHT_16613171935_image.nii.gz"
]
class OAIDataPrepare():
"""
the dataset is organized in the following style: patient_id/modality/specificity/
each folder a txt file named "slice_label.txt" Note the 'slice' here actually means volume !!!
each line includes a path of volume and path for corresponding label(None if no label),
e.g of slice_label.txt
/playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20041118_SAG_3D_DESS_LEFT_016610296205_image.nii.gz /playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20041118_SAG_3D_DESS_LEFT_016610296205_label_all.nii.gz
/playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image.nii.gz /playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_label_all.nii.gz
"""
def __init__(self):
using_machine_annotating_data = True
if using_machine_annotating_data:
self.raw_data_path_list = ["/playpen/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled"]
self.raw_label_path_list =[ "/playpen/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038",
"/playpen/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038"]
self.output_root_path = "/playpen/zyshen/summer/oai_registration/reg_0623/data"
self.output_data_path = "/playpen/zyshen/summer/oai_registration/reg_0623/data/patient_slice"
else:
self.raw_data_path_list = ["/playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled"]
self.raw_label_path_list = ["/playpen/zhenlinx/Data/OAI_segmentation/Nifti_rescaled"]
self.output_root_path = "/playpen/zyshen/summer/oai_registration/reg_0820/data"
self.output_data_path = "/playpen/zyshen/summer/oai_registration/reg_0820/data/patient_slice"
self.raw_file_path_list = []
self.raw_file_label_path_list= []
self.patient_info_dic= {}
self.image_file_end = '*image.nii.gz'
if using_machine_annotating_data:
self.label_file_end = '*reflect.nii.gz'
else:
self.label_file_end = '*label_all.nii.gz'
self.debug = False
def prepare_data(self):
self.get_file_list()
self.__factor_file_list()
self.__build_and_write_in()
def __filter_file(self, path_list, file_end):
f_filter =[]
import fnmatch
for path in path_list:
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, file_end):
f_filter.append(os.path.join(root, filename))
return f_filter
def get_file_list(self):
self.raw_file_path_list = self.__filter_file(self.raw_data_path_list,self.image_file_end)
self.raw_file_label_path_list = self.__filter_file(self.raw_label_path_list,self.label_file_end)
self.remove_abnormal_data()
def remove_abnormal_data(self):
if self.debug:
number_of_workers=20
fp_to_del = []
file_patitions = np.array_split(self.raw_file_path_list, number_of_workers)
with Pool(processes=number_of_workers) as pool:
fp_to_del_tmp=pool.map(f, file_patitions)
for fp_list in fp_to_del_tmp:
for fp in fp_list:
fp_to_del.append(fp)
print("total {} paths need to be removed".format(len(fp_to_del)))
for fp in fp_to_del:
self.raw_file_path_list.remove(fp)
else:
for fp in self.raw_file_path_list:
fn = os.path.split(fp)[1]
if fn in abnormal_example_list:
self.raw_file_path_list.remove(fp)
print("!! {} is removed from the image list".format(fn))
def __factor_file(self, f_path):
"""
For a file name like 9000099_20050712_SAG_3D_DESS_LEFT_10424405_image.nii.gz
9000099 is the patient ID, 20050712 is the scan date,
SAG_3D_DESS is the image modality,
LEFT means left knee,
and 10424405 is the image id.
:return:
"""
file_name = os.path.split(f_path)[-1]
factor_list = file_name.split('_')
patient_id = factor_list[0]
scan_date = int(factor_list[1])
modality = factor_list[2] + '_' + factor_list[3] + '_'+factor_list[4]
specificity = factor_list[5]
f = lambda x,y : x+'_'+y
file_name = reduce(f,factor_list[:7])
return {'file_path': f_path,'slice_name': file_name,'patient_id':patient_id, 'scan_date':scan_date, 'modality':modality, 'specificity':specificity,'label_path':'None'}
def __factor_file_list(self):
for f_path in self.raw_file_path_list:
fd = self.__factor_file(f_path)
if fd['patient_id'] not in self.patient_info_dic:
self.patient_info_dic[fd['patient_id']] = {}
if fd['modality'] not in self.patient_info_dic[fd['patient_id']]:
self.patient_info_dic[fd['patient_id']][fd['modality']] = {}
if fd['specificity'] not in self.patient_info_dic[fd['patient_id']][fd['modality']]:
self.patient_info_dic[fd['patient_id']][fd['modality']][fd['specificity']] = {}
cur_dict = self.patient_info_dic[fd['patient_id']][fd['modality']][fd['specificity']][fd['slice_name']]={}
cur_dict['file_path'] =fd['file_path']
cur_dict['slice_name'] =fd['slice_name']
cur_dict['scan_date'] =fd['scan_date']
cur_dict['label_path'] = fd['label_path']
for f_path in self.raw_file_label_path_list:
fd = self.__factor_file(f_path)
try:
self.patient_info_dic[fd['patient_id']][fd['modality']][fd['specificity']][fd['slice_name']]['label_path'] = f_path
except:
pass
def __build_and_write_in(self):
make_dir(self.output_root_path)
with open(os.path.join(self.output_root_path,'patient_id.txt'),'w') as fr:
for pat_id in self.patient_info_dic:
has_complete_label = True
for mod in self.patient_info_dic[pat_id]:
for spec in self.patient_info_dic[pat_id][mod]:
folder_path = os.path.join(self.output_data_path,pat_id,mod,spec)
make_dir(folder_path)
slices_info_dict = self.patient_info_dic[pat_id][mod][spec]
sorted_slice_name_list = self.__sort_by_scan_date(slices_info_dict)
with open(os.path.join(folder_path,'slice_label.txt'), 'w') as f:
for name in sorted_slice_name_list:
f.write(slices_info_dict[name]['file_path'])
f.write("\t")
f.write(slices_info_dict[name]['label_path'])
f.write("\n")
has_complete_label = has_complete_label if slices_info_dict[name]['label_path'] !='None' else False
label_complete_str = 'annotation_complete' if has_complete_label else 'annotation_not_complete'
fr.write(pat_id +'\t' + label_complete_str +'\n')
def __sort_by_scan_date(self, info_dict):
slices_name_list=[]
slices_date_list= []
for slice in info_dict:
slices_name_list.append(info_dict[slice]['slice_name'])
slices_date_list.append(info_dict[slice]['scan_date'])
slices_name_np = np.array(slices_name_list)
slices_date_np = np.array(slices_date_list)
sorted_index = np.argsort(slices_date_np)
slices_name_np = slices_name_np[sorted_index]
return list(slices_name_np)
#
# test = OAIDataPrepare()
# test.debug=False
# test.prepare_data()
# patients = Patients(full_init=True)
# filtered_patients = patients.get_filtered_patients_list(specificity='RIGHT',num_of_patients=3, len_time_range=[2,7], use_random=False)
|
319ca39db6283f9d7b254a5ac72916ca46f308f3
|
96dd2e2f3db81e9c5f01cc11e1c53c54f1865b4e
|
/benchmarks/bench_structs.py
|
33457339a04ef1d458c51766db1fec76c20f2d36
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
jcrist/msgspec
|
10d5658a618bdd823ecbece7786456707a81ccd9
|
e76221f770e9cdcf40e08759a5f88a328e3db8d6
|
refs/heads/main
| 2023-08-17T10:27:54.573787
| 2023-08-16T04:42:43
| 2023-08-16T04:47:32
| 332,952,543
| 1,191
| 44
|
BSD-3-Clause
| 2023-09-06T22:26:46
| 2021-01-26T02:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,249
|
py
|
bench_structs.py
|
"""This file benchmarks dataclass-like libraries. It measures the following
operations:
- Time to import a new class definition
- Time to create an instance of that class
- Time to compare an instance of that class with another instance.
"""
from time import perf_counter
order_template = """
def __{method}__(self, other):
if type(self) is not type(other):
return NotImplemented
return (
(self.a, self.b, self.c, self.d, self.e) {op}
(other.a, other.b, other.c, other.d, other.e)
)
"""
classes_template = """
import reprlib
class C{n}:
def __init__(self, a, b, c, d, e):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
@reprlib.recursive_repr()
def __repr__(self):
return (
f"{{type(self).__name__}}(a={{self.a!r}}, b={{self.b!r}}, "
f"c={{self.c!r}}, d={{self.d!r}}, e={{self.e!r}})"
)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return (
self.a == other.a and
self.b == other.b and
self.c == other.c and
self.d == other.d and
self.e == other.e
)
""" + "".join(
[
order_template.format(method="lt", op="<"),
order_template.format(method="le", op="<="),
order_template.format(method="gt", op=">"),
order_template.format(method="ge", op=">="),
]
)
attrs_template = """
from attr import define
@define(order=True)
class C{n}:
a: int
b: int
c: int
d: int
e: int
"""
dataclasses_template = """
from dataclasses import dataclass
@dataclass(order=True)
class C{n}:
a: int
b: int
c: int
d: int
e: int
"""
pydantic_template = """
from pydantic import BaseModel
class C{n}(BaseModel):
a: int
b: int
c: int
d: int
e: int
"""
msgspec_template = """
from msgspec import Struct
class C{n}(Struct, order=True):
a: int
b: int
c: int
d: int
e: int
"""
sources = {
"standard classes": classes_template,
"attrs": attrs_template,
"dataclasses": dataclasses_template,
"pydantic": pydantic_template,
"msgspec": msgspec_template,
}
def bench(name, template):
print(f"Benchmarking {name}:")
N_classes = 100
source = "\n".join(template.format(n=i) for i in range(N_classes))
code_obj = compile(source, "__main__", "exec")
# Benchmark defining new types
N = 200
start = perf_counter()
for _ in range(N):
ns = {}
exec(code_obj, ns)
end = perf_counter()
define_time = ((end - start) / (N * N_classes)) * 1e6
print(f"- define: {define_time:.2f} μs")
C = ns["C0"]
# Benchmark creating new instances
N = 1000
M = 1000
start = perf_counter()
for _ in range(N):
[C(a=i, b=i, c=i, d=i, e=i) for i in range(M)]
end = perf_counter()
init_time = ((end - start) / (N * M)) * 1e6
print(f"- init: {init_time:.2f} μs")
# Benchmark equality
N = 1000
M = 1000
val = M - 1
needle = C(a=val, b=val, c=val, d=val, e=val)
haystack = [C(a=i, b=i, c=i, d=i, e=i) for i in range(M)]
start = perf_counter()
for _ in range(N):
haystack.index(needle)
end = perf_counter()
equality_time = ((end - start) / (N * M)) * 1e6
print(f"- equality: {equality_time:.2f} μs")
# Benchmark order
try:
needle < needle
except TypeError:
order_time = None
print("- order: N/A")
else:
start = perf_counter()
for _ in range(N):
for obj in haystack:
if obj >= needle:
break
end = perf_counter()
order_time = ((end - start) / (N * M)) * 1e6
print(f"- order: {order_time:.2f} μs")
return (name, define_time, init_time, equality_time, order_time)
def format_table(results):
columns = (
"",
"import (μs)",
"create (μs)",
"equality (μs)",
"order (μs)",
)
def f(n):
return "N/A" if n is None else f"{n:.2f}"
rows = []
for name, *times in results:
rows.append((f"**{name}**", *(f(t) for t in times)))
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar_underline = "+%s+" % "+".join("=" * (w + 2) for w in widths)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
parts = [bar, header, bar_underline]
for r in rows:
parts.append(row_template % r)
parts.append(bar)
return "\n".join(parts)
def main():
import argparse
parser = argparse.ArgumentParser(description="Benchmark msgspec Struct operations")
parser.add_argument(
"--output-table",
action="store_true",
help="whether to output a ReST table at the end",
)
args = parser.parse_args()
results = []
for name, source in sources.items():
results.append(bench(name, source))
if args.output_table:
print(format_table(results))
if __name__ == "__main__":
main()
|
02bc798de784aae6af42f14186d2705b804ef150
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/Research/kubeflow-on-azure-stack-lab/04-KFServing/tensorflow_web_infer.py
|
0f2eb6bd1f84c1adb6a219a8c2f8144053008e9e
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
tensorflow_web_infer.py
|
from PIL import Image
import numpy as np
import tensorflow as tf
import requests
import json
sz=(224,224)
myimagefilename = 'grace_hopper.jpg'
# You need to put the host and port according to what you deployed in your system
# eg. http://51.141.178.47:5001/score
scoring_uri = 'http://<INGRESS_HOST>:<INGRESS_PORT>/v1/models/custom-model:predict'
try:
labels_path = tf.keras.utils.get_file(
'ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
np.set_printoptions(threshold=np.inf)
print(f"imagenet_labels: {imagenet_labels}")
# Creating the json using PIL
pil_im = Image.open(myimagefilename,'r')
pil_out = pil_im.resize(sz)
pil_out.save('resized_image.jpg')
test_sample_via_pil = "{\"instances\":[{\"input_1\":" + str(np.array(pil_out).tolist()) + "}]}"
# print(test_sample)
## We can check that we get the image back if we wanted to
# imgX = Image.fromarray(np.array(pil_out), 'RGB')
# imgX.save('resized_image_from_array.png')
# Creating the json using keras pre-procesing
file2 = tf.keras.utils.get_file(
"bowtie2.jpg",
"https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg"
)
img2 = tf.keras.preprocessing.image.load_img(file2, target_size=[224, 224])
x2 = tf.keras.preprocessing.image.img_to_array(img2)
x2 = tf.keras.applications.mobilenet.preprocess_input(
x2[tf.newaxis,...])
# we can save it as a .npy, for model_cli
np.save("mybowtie2.npy",x2)
myx2str = str(x2.tolist())[1:-1]
test_sample2 = "{\"instances\":[{\"input_1\":" + myx2str + "}]}"
print(test_sample2)
print(f"scoring_uri is {scoring_uri}")
# Set the content type
headers = {'Content-Type': 'application/json',
'Host': 'custom-model.kfserving-test.example.com'}
# Make the request
resp = requests.post(scoring_uri, test_sample2, headers=headers)
result_test_s = resp.text.strip("[]")
print(f"result_test_s: \"{result_test_s}\"")
result = json.loads(result_test_s)
result_test = result["predictions"]
print(f"result_test: {result_test}")
decoded_test = np.argsort(result_test)[0,::-1][:5]
print("Result for test image: ", decoded_test)
decoded_test_labeled = imagenet_labels[decoded_test]
print(" ", decoded_test_labeled)
except KeyError as e:
print(str(e))
|
4afd434d9d01171b97cbc8d93ce069f348b582fa
|
ffb0b623455f22af81a03eb52889bd1bfed50566
|
/src/bandersnatch/configuration.py
|
30b050d7ced8e5a37d447a060b00bdf23a16495c
|
[
"AFL-3.0"
] |
permissive
|
pypa/bandersnatch
|
c5ba356caae55e4edb80005da625b04e7fb70500
|
bf19ea547086c1b9dd997d1dc00081109b5cd626
|
refs/heads/main
| 2023-09-03T03:27:19.538217
| 2023-08-28T23:55:04
| 2023-08-28T23:55:04
| 133,377,409
| 405
| 157
|
AFL-3.0
| 2023-09-13T10:46:33
| 2018-05-14T14:52:22
|
Python
|
UTF-8
|
Python
| false
| false
| 8,196
|
py
|
configuration.py
|
"""
Module containing classes to access the bandersnatch configuration file
"""
import configparser
import importlib.resources
import logging
from pathlib import Path
from typing import Any, NamedTuple
from .simple import SimpleDigest, SimpleFormat, get_digest_value, get_format_value
logger = logging.getLogger("bandersnatch")
class SetConfigValues(NamedTuple):
json_save: bool
root_uri: str
diff_file_path: str
diff_append_epoch: bool
digest_name: str
storage_backend_name: str
cleanup: bool
release_files_save: bool
compare_method: str
download_mirror: str
download_mirror_no_fallback: bool
simple_format: SimpleFormat
class Singleton(type): # pragma: no cover
_instances: dict["Singleton", type] = {}
def __call__(cls, *args: Any, **kwargs: Any) -> type:
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class BandersnatchConfig(metaclass=Singleton):
# Ensure we only show the deprecations once
SHOWN_DEPRECATIONS = False
def __init__(self, config_file: str | None = None) -> None:
"""
Bandersnatch configuration class singleton
This class is a singleton that parses the configuration once at the
start time.
Parameters
==========
config_file: str, optional
Path to the configuration file to use
"""
self.found_deprecations: list[str] = []
self.default_config_file = str(
importlib.resources.files("bandersnatch") / "default.conf"
)
self.config_file = config_file
self.load_configuration()
# Keeping for future deprecations ... Commenting to save function call etc.
# self.check_for_deprecations()
def check_for_deprecations(self) -> None:
if self.SHOWN_DEPRECATIONS:
return
self.SHOWN_DEPRECATIONS = True
def load_configuration(self) -> None:
"""
Read the configuration from a configuration file
"""
config_file = self.default_config_file
if self.config_file:
config_file = self.config_file
self.config = configparser.ConfigParser(delimiters="=")
# mypy is unhappy with us assigning to a method - (monkeypatching?)
self.config.optionxform = lambda option: option # type: ignore
self.config.read(config_file)
def validate_config_values( # noqa: C901
config: configparser.ConfigParser,
) -> SetConfigValues:
try:
json_save = config.getboolean("mirror", "json")
except configparser.NoOptionError:
logger.error(
"Please update your config to include a json "
+ "boolean in the [mirror] section. Setting to False"
)
json_save = False
try:
root_uri = config.get("mirror", "root_uri")
except configparser.NoOptionError:
root_uri = ""
try:
diff_file_path = config.get("mirror", "diff-file")
except configparser.NoOptionError:
diff_file_path = ""
if "{{" in diff_file_path and "}}" in diff_file_path:
diff_file_path = diff_file_path.replace("{{", "").replace("}}", "")
diff_ref_section, _, diff_ref_key = diff_file_path.partition("_")
try:
diff_file_path = config.get(diff_ref_section, diff_ref_key)
except (configparser.NoOptionError, configparser.NoSectionError):
logger.error(
"Invalid section reference in `diff-file` key. "
"Please correct this error. Saving diff files in"
" base mirror directory."
)
diff_file_path = str(
Path(config.get("mirror", "directory")) / "mirrored-files"
)
try:
diff_append_epoch = config.getboolean("mirror", "diff-append-epoch")
except configparser.NoOptionError:
diff_append_epoch = False
try:
logger.debug("Checking config for storage backend...")
storage_backend_name = config.get("mirror", "storage-backend")
logger.debug("Found storage backend in config!")
except configparser.NoOptionError:
storage_backend_name = "filesystem"
logger.debug(
"Failed to find storage backend in config, falling back to default!"
)
logger.info(f"Selected storage backend: {storage_backend_name}")
try:
digest_name = get_digest_value(config.get("mirror", "digest_name"))
except configparser.NoOptionError:
digest_name = SimpleDigest.SHA256
logger.debug(f"Using digest {digest_name} by default ...")
except ValueError as e:
logger.error(
f"Supplied digest_name {config.get('mirror', 'digest_name')} is "
+ "not supported! Please update the digest_name in the [mirror] "
+ "section of your config to a supported digest value."
)
raise e
try:
cleanup = config.getboolean("mirror", "cleanup")
except configparser.NoOptionError:
logger.debug(
"bandersnatch is not cleaning up non PEP 503 normalized Simple "
+ "API directories"
)
cleanup = False
release_files_save = config.getboolean("mirror", "release-files", fallback=True)
if not release_files_save and not root_uri:
root_uri = "https://files.pythonhosted.org"
logger.error(
"Please update your config to include a root_uri in the [mirror] "
+ "section when disabling release file sync. Setting to "
+ root_uri
)
try:
logger.debug("Checking config for compare method...")
compare_method = config.get("mirror", "compare-method")
logger.debug("Found compare method in config!")
except configparser.NoOptionError:
compare_method = "hash"
logger.debug(
"Failed to find compare method in config, falling back to default!"
)
if compare_method not in ("hash", "stat"):
raise ValueError(
f"Supplied compare_method {compare_method} is not supported! Please "
+ "update compare_method to one of ('hash', 'stat') in the [mirror] "
+ "section."
)
logger.info(f"Selected compare method: {compare_method}")
try:
logger.debug("Checking config for alternative download mirror...")
download_mirror = config.get("mirror", "download-mirror")
logger.info(f"Selected alternative download mirror {download_mirror}")
except configparser.NoOptionError:
download_mirror = ""
logger.debug("No alternative download mirror found in config.")
if download_mirror:
try:
logger.debug(
"Checking config for only download from alternative download"
+ "mirror..."
)
download_mirror_no_fallback = config.getboolean(
"mirror", "download-mirror-no-fallback"
)
if download_mirror_no_fallback:
logger.info("Setting to download from mirror without fallback")
else:
logger.debug("Setting to fallback to original if download mirror fails")
except configparser.NoOptionError:
download_mirror_no_fallback = False
logger.debug("No download mirror fallback setting found in config.")
else:
download_mirror_no_fallback = False
logger.debug(
"Skip checking download-mirror-no-fallback because dependent option"
+ "is not set in config."
)
try:
simple_format = get_format_value(config.get("mirror", "simple-format"))
except configparser.NoOptionError:
logger.debug("Storing all Simple Formats by default ...")
simple_format = SimpleFormat.ALL
return SetConfigValues(
json_save,
root_uri,
diff_file_path,
diff_append_epoch,
digest_name,
storage_backend_name,
cleanup,
release_files_save,
compare_method,
download_mirror,
download_mirror_no_fallback,
simple_format,
)
|
30735c2c2a33271e2a0ff60b3610b0a6bad2728e
|
57c0a57269dfc516c7f46468940efb62cb863af4
|
/langchain/chains/__init__.py
|
09563968e5f865bac6d07b898f829b49b086ef43
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/MM-REACT
|
e67a843faadf4752f5e9d0e2dbf0c80068dbd288
|
b8f29af7f3c24cf3a4554bebfa2053064467fbdb
|
refs/heads/main
| 2023-08-31T03:01:42.246514
| 2023-05-12T20:43:11
| 2023-05-12T20:43:11
| 614,230,777
| 705
| 57
|
MIT
| 2023-08-30T00:04:13
| 2023-03-15T06:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
__init__.py
|
"""Chains are easily reusable components which can be linked together."""
from langchain.chains.api.base import APIChain
from langchain.chains.chat_vector_db.base import ChatVectorDBChain
from langchain.chains.combine_documents.base import AnalyzeDocumentChain
from langchain.chains.constitutional_ai.base import ConstitutionalChain
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.graph_qa.base import GraphQAChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.loading import load_chain
from langchain.chains.mapreduce import MapReduceChain
from langchain.chains.moderation import OpenAIModerationChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.chains.sql_database.base import (
SQLDatabaseChain,
SQLDatabaseSequentialChain,
)
from langchain.chains.transform import TransformChain
from langchain.chains.vector_db_qa.base import VectorDBQA
__all__ = [
"ConversationChain",
"LLMChain",
"LLMBashChain",
"LLMCheckerChain",
"LLMMathChain",
"PALChain",
"QAWithSourcesChain",
"SQLDatabaseChain",
"SequentialChain",
"SimpleSequentialChain",
"VectorDBQA",
"VectorDBQAWithSourcesChain",
"APIChain",
"LLMRequestsChain",
"TransformChain",
"MapReduceChain",
"OpenAIModerationChain",
"SQLDatabaseSequentialChain",
"load_chain",
"AnalyzeDocumentChain",
"HypotheticalDocumentEmbedder",
"ChatVectorDBChain",
"GraphQAChain",
"ConstitutionalChain",
]
|
5356f438d382cc4bc37bf344d7d5f371bc2b5cb9
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/recipes/stages/_base_/schedules/schedule.py
|
46bbc94ddce880f566eb294262c523019a6ad830
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
schedule.py
|
# base schedule setting
lr_config = dict(policy="fixed")
|
2c777a3f63b6d38d468416f94de929e85cf0e8a0
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/tests/providers/ibmcloud/ibmcloud_virtual_machine_test.py
|
3c134a6f174d66322e84316dc5ae7d3129b7b5f3
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,835
|
py
|
ibmcloud_virtual_machine_test.py
|
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.tests.providers.ibmcloud.ibmcloud_virtual_machine."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.ibmcloud import ibm_api
from perfkitbenchmarker.providers.ibmcloud import ibmcloud_virtual_machine
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'unixbench'
BENCHMARK_CONFIG = """
unixbench:
description: Runs UnixBench.
vm_groups:
default:
vm_spec: *default_single_core
"""
URI = 'ibmuri123'
class TestIbmCloudVirtualMachine(pkb_common_test_case.TestOsMixin,
ibmcloud_virtual_machine.IbmCloudVirtualMachine
):
pass
class IbmCloudVirtualMachineTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(IbmCloudVirtualMachineTest, self).setUp()
self.mock_create_instance = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, 'CreateInstance'))
self.mock_instance_status = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, 'InstanceStatus'))
self.mock_get_resource = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, 'GetResource'))
self.mock_create_vpc = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, 'CreateVpc'))
self.mock_create_subnet = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, 'CreateSubnet'))
self.mock_check_environment = self.enter_context(
mock.patch.object(ibm_api.IbmAPICommand, '_CheckEnvironment'))
with mock.patch.object(ibm_api.IbmAPICommand, '__init__',
lambda self: None):
self.cmd = ibm_api.IbmAPICommand()
self.vm = self._CreateTestIbmCloudVirtualMachine()
def _CreateBenchmarkSpecFromYaml(self, yaml_string,
benchmark_name=BENCHMARK_NAME):
config = configs.LoadConfig(yaml_string, {}, benchmark_name)
spec = self._CreateBenchmarkSpecFromConfigDict(config, benchmark_name)
spec.disable_interrupt_moderation = False
spec.disable_rss = False
spec.zone = 'us-south-1'
spec.cidr = '10.101.0.0/24'
spec.machine_type = 'Test_machine_type'
spec.gpu_count = '1'
spec.gpu_type = 'test-gpu-type'
spec.image = 'test-image'
spec.install_packages = 'None'
spec.assign_external_ip = True
spec.background_cpu_threads = 'None'
spec.background_network_mbits_per_sec = '1'
spec.background_network_ip_type = 'None'
spec.boot_startup_script = 'None'
spec.vm_metadata = {}
return spec
def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
benchmark_name,
flag_values=FLAGS,
**config_dict)
benchmark_module = next((b for b in linux_benchmarks.BENCHMARKS
if b.BENCHMARK_NAME == benchmark_name))
return benchmark_spec.BenchmarkSpec(benchmark_module, config_spec, URI)
def _CreateTestIbmCloudVirtualMachine(self):
spec = self._CreateBenchmarkSpecFromYaml(BENCHMARK_CONFIG)
return TestIbmCloudVirtualMachine(spec)
def testSetupResources(self):
self.vm.zone = 'us-south-2'
self.prefix = 'perfkit'
FLAGS.run_uri = 'testuri'
self.mock_create_vpc.side_effect = ['vpc_id']
self.mock_create_subnet.side_effect = [{'id': 'subnet_id'}]
self.mock_get_resource.side_effect = [{'id': 'resource_id'}]
self.assertEqual('vpc_id', self.cmd.CreateVpc())
self.assertEqual('subnet_id', self.cmd.CreateSubnet()['id'])
self.assertEqual('resource_id', self.cmd.GetResource()['id'])
def testIbmCloudVirtualMachine(self):
self.mock_create_instance.side_effect = [{'id': 'vm_test_id'}]
self.mock_instance_status.side_effect = ['running']
self.assertEqual(20000, self.vm.volume_iops)
self.assertEqual('running', self.cmd.InstanceStatus())
self.assertEqual('vm_test_id', self.cmd.CreateInstance()['id'])
if __name__ == '__main__':
unittest.main()
|
047b60dcee8c22f3e0f4f81b4d923fd302bd3a76
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/perf/generate_legacy_perf_dashboard_json_unittest.py
|
b92b23bcba8935e006802d6f68e4d4396c340b27
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,666
|
py
|
generate_legacy_perf_dashboard_json_unittest.py
|
#!/usr/bin/env vpython3
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
import six
import generate_legacy_perf_dashboard_json
class LegacyResultsProcessorUnittest(unittest.TestCase):
def setUp(self):
"""Set up for all test method of each test method below."""
super(LegacyResultsProcessorUnittest, self).setUp()
if six.PY2:
self.data_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
else:
self.data_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata', 'python3')
def _ConstructDefaultProcessor(self):
"""Creates a LegacyResultsProcessor instance.
Returns:
An instance of LegacyResultsProcessor class
"""
return generate_legacy_perf_dashboard_json.LegacyResultsProcessor()
def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201
"""Reads in a input log file and processes it.
This changes the state of the log processor object; the output is stored
in the object and can be gotten using the PerformanceLogs() method.
Args:
log_processor: An PerformanceLogProcessor instance.
logfile: File name of an input performance results log file.
"""
for line in open(os.path.join(self.data_directory, logfile)):
log_processor.ProcessLine(line)
def _CheckFileExistsWithData(self, logs, graph):
"""Asserts that |graph| exists in the |logs| dict and is non-empty."""
self.assertTrue(graph in logs, 'File %s was not output.' % graph)
self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)
def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs):
"""Uses a log processor to process the given input files.
Args:
inputfiles: A list of input performance results log file names.
logfiles: List of expected output ".dat" file names.
Returns:
A dictionary mapping output file name to output file lines.
"""
parser = self._ConstructDefaultProcessor()
for inputfile in inputfiles:
self._ProcessLog(parser, inputfile)
logs = json.loads(parser.GenerateGraphJson())
for graph in graphs:
self._CheckFileExistsWithData(logs, graph)
return logs
def _ConstructParseAndCheckJSON(
self, inputfiles, logfiles, graphs):
"""Processes input with a log processor and checks against expectations.
Args:
inputfiles: A list of input performance result log file names.
logfiles: A list of expected output ".dat" file names.
subdir: Subdirectory containing expected output files.
log_processor_class: A log processor class.
"""
logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)
index = 0
for filename in logfiles:
graph_name = graphs[index]
actual = logs[graph_name]
path = os.path.join(self.data_directory, filename)
expected = json.load(open(path))
self.assertEqual(expected, actual, 'JSON data in %s did not match '
'expectations.' % filename)
index += 1
def testSummary(self):
graphs = ['commit_charge',
'ws_final_total', 'vm_final_browser', 'vm_final_total',
'ws_final_browser', 'processes', 'artificial_graph']
# Tests the output of "summary" files, which contain per-graph data.
input_files = ['graphing_processor.log']
output_files = ['%s-summary.dat' % graph for graph in graphs]
self._ConstructParseAndCheckJSON(input_files, output_files, graphs)
if __name__ == '__main__':
unittest.main()
|
266c7f9b7aa1ff1442edf5960caf134dcb00fb29
|
25daa9604b83ddc199764309c39da106a5313c22
|
/test/test_pipeline_arg_parsing.py
|
0987bdf102430e6932fb4863286b46cdfaddfbbe
|
[
"MIT"
] |
permissive
|
ThoughtWorksInc/CD4ML-Scenarios
|
83d3f162a2ddbb7e02662d03f769feb8978b4de9
|
a9200df2f926f7e398dd820b99a11515c9a3eacb
|
refs/heads/master
| 2023-05-12T07:11:39.677610
| 2022-05-31T14:14:57
| 2022-05-31T14:14:57
| 246,649,538
| 127
| 327
|
MIT
| 2023-05-01T23:38:18
| 2020-03-11T18:26:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
test_pipeline_arg_parsing.py
|
from scripts import pipeline as pipeline_script
def test_no_arguments_parse():
argument_parser = pipeline_script.make_argument_parser()
parsed_arguments = argument_parser.parse_args([])
assert parsed_arguments.problem_name == "houses"
assert parsed_arguments.ml_pipeline_params_name == "default"
assert parsed_arguments.feature_set_name == "default"
assert parsed_arguments.algorithm_name == "default"
assert parsed_arguments.algorithm_params_name == "default"
def test_problem_name_supplied():
argument_parser = pipeline_script.make_argument_parser()
parsed_arguments = argument_parser.parse_args(["groceries"])
assert parsed_arguments.problem_name == "groceries"
assert parsed_arguments.ml_pipeline_params_name == "default"
assert parsed_arguments.feature_set_name == "default"
assert parsed_arguments.algorithm_name == "default"
assert parsed_arguments.algorithm_params_name == "default"
def test_all_argument_supplied():
argument_parser = pipeline_script.make_argument_parser()
parsed_arguments = argument_parser.parse_args(["groceries", "a", "b", "c", "d"])
assert parsed_arguments.problem_name == "groceries"
assert parsed_arguments.ml_pipeline_params_name == "a"
assert parsed_arguments.feature_set_name == "b"
assert parsed_arguments.algorithm_name == "c"
assert parsed_arguments.algorithm_params_name == "d"
|
10b5210b77dbc727a6ac0efe3b9d3efc16281d81
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/pagination/custom.py
|
ea04083576be529dc6f1a927349453c56fa2a7cc
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
custom.py
|
from django.core.paginator import Page, Paginator
class ValidAdjacentNumsPage(Page):
def next_page_number(self):
if not self.has_next():
return None
return super().next_page_number()
def previous_page_number(self):
if not self.has_previous():
return None
return super().previous_page_number()
class ValidAdjacentNumsPaginator(Paginator):
def _get_page(self, *args, **kwargs):
return ValidAdjacentNumsPage(*args, **kwargs)
|
1628fe23fc43f7bfb2362f548686f0d2d34ac06f
|
67f6ca6dd3f8fb1d3104f931546c50445846083c
|
/src/main/python/smart/smartplots_run.py
|
12c3f19e054071192e4774a6316f5b76497da422
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
LBNL-UCB-STI/beam
|
7e63cf28854a0b78e5f123629f5ff84966d75deb
|
ca433c85c592285cf4ff6c28620b3538fe9cc9ba
|
refs/heads/develop
| 2023-09-01T03:51:59.353627
| 2023-08-31T15:04:26
| 2023-08-31T15:04:26
| 73,118,824
| 142
| 71
|
NOASSERTION
| 2023-09-11T14:53:58
| 2016-11-07T20:38:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,468
|
py
|
smartplots_run.py
|
from smartplots_setup import pltModeSplitByTrips
from smartplots_setup import pltEnergyPerCapita
from smartplots_setup import pltLdvRhOccupancy
from smartplots_setup import pltLdvPersonHourTraveled
from smartplots_setup import pltModeSplitInPMT
from smartplots_setup import pltModeSplitInVMT
from smartplots_setup import pltLdvTechnologySplitInVMT
from smartplots_setup import pltRHWaitTime
from smartplots_setup import pltRHEmptyPooled
from smartplots_setup import pltLdvRhOccupancyByVMT
from smartplots_setup import tableSummary
import pandas as pd
plt_setup_base_smart = {
'expansion_factor': (7.75/0.315) * 27.0 / 21.3,
'rotation': 13,
'fig_size': (7.5, 4.5),
'scenarios': ['Base', 'Base-Short', 'Base-Long', 'Sharing is Caring', 'Technology Takeover', "All About Me"],
'scenarios_xpos': [1, 3.5, 6.5, 9.5, 12.5, 15.5],
'technologies': ["Base", "BAU", "VTO", "BAU", "VTO", "BAU", "VTO", "BAU", "VTO", "BAU", "VTO"],
'technologies_xpos': [1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16],
'dimension': 11,
'rank_to_filterout': []
}
plt_setup_smart = {
'expansion_factor': (7.75/0.315) * 27.0 / 21.3,
'rotation': 11,
'fig_size': (5, 4.5),
'scenarios': ['Base', 'Sharing is Caring', 'Technology Takeover', "All About Me"],
'scenarios_xpos': [1, 3.5, 6.5, 9.5],
'technologies': ["Base", "BAU", "VTO", "BAU", "VTO", "BAU", "VTO"],
'technologies_xpos': [1, 3, 4, 6, 7, 9, 10],
'dimension': 7,
'rank_to_filterout': [2, 3, 4, 5]
}
output_folder = "/Users/haitam/workspace/pyscripts/data/smart/15thSep2019"
year = "2010"
iteration = "15"
prefix = "{}.{}".format(year, iteration)
metrics_file = "{}/{}.metrics-final.csv".format(output_folder, prefix)
df = pd.read_csv(metrics_file).fillna(0)
tableSummary(plt_setup_smart, df, output_folder, prefix)
pltModeSplitByTrips(plt_setup_smart, df, output_folder, prefix)
pltLdvRhOccupancy(plt_setup_smart, df, output_folder, prefix)
pltModeSplitInPMT(plt_setup_smart, df, output_folder, prefix)
pltLdvTechnologySplitInVMT(plt_setup_smart, df, output_folder, prefix)
pltModeSplitInVMT(plt_setup_smart, df, output_folder, prefix)
pltRHEmptyPooled(plt_setup_smart, df, output_folder, prefix)
pltLdvRhOccupancyByVMT(plt_setup_smart, df, output_folder, prefix)
pltEnergyPerCapita(plt_setup_base_smart, df, output_folder, prefix)
pltLdvPersonHourTraveled(plt_setup_base_smart, df, output_folder, prefix)
pltRHWaitTime(plt_setup_base_smart, df, output_folder, prefix)
|
99ad930f0ebc1df253ccfaa36d0bb124069503f2
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/scripts/pose/directpose/tvm_evaluation/pose_model.py
|
26abcf1a713a718bd3888ec09bf59910f36ffeb8
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 9,833
|
py
|
pose_model.py
|
import tvm
from tvm.contrib import graph_runtime
from typing import List
import cv2
import numpy as np
try:
import torch
except ImportError:
torch = None
class PoseEstimationInferenceModel():
def __init__(
self,
model_prefix,
gpu_id,
image_width=1280,
image_height=720,
threshold=0.05,
use_torch=False
):
self.torch = use_torch
if self.torch:
assert torch is not None, "requires torch when `use_torch` is enabled"
self.__new_nd_array = tvm.nd.array
self.device_ctx = tvm.gpu(gpu_id)
self.image_width = image_width
self.image_height = image_height
self.__model = self.load_model(model_prefix, self.device_ctx)
self.threshold = threshold
self.mean = np.array([0.406 * 255, 0.456 * 255, 0.485 * 255], dtype=np.float32)
self.std = np.array([1, 1, 1], dtype=np.float32)
def load_model(self, model_prefix, device_ctx):
if not self.torch:
with open('{}.json'.format(model_prefix), 'r') as f:
model_json = f.read()
model_lib = tvm.runtime.load_module('{}.so'.format(model_prefix))
with open('{}.params'.format(model_prefix), "rb") as f:
model_params = bytearray(f.read())
module = graph_runtime.create(
model_json,
model_lib,
device_ctx
)
module.load_params(model_params)
return module
import torch
from gluoncv.torch import model_zoo
from gluoncv.torch.engine.config import get_cfg_defaults
torch.set_grad_enabled(False)
device = torch.device('cuda')
cfg = get_cfg_defaults(name='directpose')
# cfg.merge_from_file('./configurations/ms_dla_34_4x_syncbn.yaml')
# net = model_zoo.dla34_fpn_directpose(cfg).to(device).eval()
# model = torch.load('model_final.pth')['model']
cfg.merge_from_file('./configurations/ms_aa_resnet50_4x_syncbn.yaml')
net = model_zoo.directpose_resnet_lpf_fpn(cfg).to(device).eval()
model = torch.load('model_final_resnet.pth')['model']
# _ = net(torch.zeros((1,3, self.image_height, self.image_width)).cuda())
net.load_state_dict(model, strict=False)
return net
def process(
self,
payloads: List = [],
**kwargs
) -> List[object]:
"""
:param payloads: a list of {
"image": RGB channel raw image or pre-processed tensor
"transform_info": None means "image" is RGB channel raw image, otherwise "image" is pre-processed tensor and this if the transformation info
}
:return:
for each payload in payloads:
preprocessed_payload = __preprocess(payload)
results = run PersonDetection against preprocessed_payload
postprocessed_results = __postprocess(results)
the final results will be a list of {
"bboxes": N x 4 numpy array in format (x0, y0, x1, y1),
"class_ids": N x 1 numpy array contaning class ids for each bounding box, id = 0 means person,
"scores": N x 1 numpy array containing confidence score for each bounding box
}
"""
results = []
for image in payloads:
if image["transform_info"] is None:
img, transform_info = self.__preprocess(image["image"])
else: # image already preprocessed
img = image["image"]
transform_info = image["transform_info"]
if not self.torch:
self.__model.set_input("input0", self.__new_nd_array(img, ctx=self.device_ctx))
self.__model.run()
class_IDs, nms_ret, bounding_boxs, scores, keypoints = self.__model.get_output(0), self.__model.get_output(1), \
self.__model.get_output(2), self.__model.get_output(3), self.__model.get_output(4)
# dd = class_IDs.asnumpy()
# print(dd.shape, dd)
# raise
np_bbox, np_ids, np_scores, np_kpts = self. __postprocess(bounding_boxs, class_IDs, scores, keypoints, transform_info, nms_ret)
else:
# img = np.load('input.npy')
class_IDs, idxs, bounding_boxs, scores, keypoints = self.__model(torch.as_tensor(img).cuda())
# dd = bounding_boxs.cpu().numpy()
# print(dd.shape, dd)
# raise
np_bbox, np_ids, np_scores, np_kpts = self. __postprocess(bounding_boxs, class_IDs, scores, keypoints, transform_info, idxs)
results.append({
"bboxes": np_bbox,
"class_ids": np_ids,
"scores": np_scores,
"keypoints": np_kpts
})
# self.visualize(np_bbox, np_scores, image["image"], transform_info)
#print(np_bbox.shape, np_ids.shape, np_scores.shape, np_kpts.shape)
return results
def __preprocess(
self,
payload: object = None
):
"""
:param payload: RGB channel
:return: pre-processed image, and transform info (p_b_w, p_b_h, pad_size_h, pad_size_w)
"""
self.original_h, self.original_w, _ = payload.shape
if float(self.original_h) / self.original_w >= float(self.image_height) / float(self.image_width):
resize_ratio = (float(self.original_h) * self.image_width) / (float(self.original_w) * self.image_height)
pad_size = (self.original_h, int(self.original_w * resize_ratio))
else:
resize_ratio = (float(self.original_w) * self.image_height) / (float(self.original_h) * self.image_width)
pad_size = (int(self.original_h * resize_ratio), self.original_w)
p_b_h = (pad_size[0] - self.original_h) // 2
p_b_w = (pad_size[1] - self.original_w) // 2
p_a_h = pad_size[0] - self.original_h - p_b_h
p_a_w = pad_size[1] - self.original_w - p_b_w
pad_width = ((p_b_h, p_a_h), (p_b_w, p_a_w), (0, 0))
transform_info = (p_b_w, p_b_h, pad_size[0], pad_size[1])
image = payload.astype(np.float32)
# noramlize the image
image = (image - self.mean) / self.std
# image padding
image = np.pad(image, pad_width)
# image resize (for opencv, width come first)
image = cv2.resize(image, (self.image_width, self.image_height), interpolation=cv2.INTER_LINEAR)
# noramlize the image
# image = (image - self.mean) / self.std
# convert to CxHXW
image = np.transpose(image, (2, 0, 1))
return image[None, ...], transform_info
def __postprocess(
self,
bounding_boxs,
class_IDs,
scores,
keypoints,
transform_info,
nms_ret=None
):
"""
post processing the results[] gotten from process
"""
p_b_w, p_b_h, padded_h, padded_w = transform_info
assert nms_ret is not None
if self.torch:
idxs = nms_ret
np_bbx = bounding_boxs[idxs].cpu().numpy()
np_scores = scores[idxs].cpu().numpy()
np_ids = class_IDs[idxs].cpu().numpy()
np_keypoints = keypoints[idxs].cpu().numpy()
else:
np_bbx = bounding_boxs.asnumpy()
np_ids = class_IDs.asnumpy()
np_scores = scores.asnumpy()
np_keypoints = keypoints.asnumpy()
idxs = nms_ret.asnumpy()[:, -1].astype(int)
idxs = idxs[np.where(idxs >= 0)[0]]
np_bbx = np_bbx[idxs, :]
np_ids = np_ids[idxs]
np_scores = np_scores[idxs]
np_keypoints = np_keypoints[idxs, :, :]
# print(np_scores.shape, np_bbx.shape, np_keypoints.shape, np_ids.shape)
idx = np.where(np_scores > self.threshold)[0]
np_bbx = np_bbx[idx, :]
np_scores = np_scores[idx, np.newaxis]
np_ids = np_ids[idx]
np_keypoints = np_keypoints[idx, :, :]
# idx = np.where(np_scores > self.threshold)[0]
# np_bbox = np_bbx[idx, :]
# np_scores = np_scores[idx]
# np_ids = np_ids[idx]
# np_keypoints = np_keypoints[idx, :, :]
np_bbx[:, 0] *= padded_w / self.image_width
np_bbx[:, 1] *= padded_h / self.image_height
np_bbx[:, 2] *= padded_w / self.image_width
np_bbx[:, 3] *= padded_h / self.image_height
np_bbx[:, 0] -= p_b_w
np_bbx[:, 1] -= p_b_h
np_bbx[:, 2] -= p_b_w
np_bbx[:, 3] -= p_b_h
np_bbx[:, 0] = np.clip(np_bbx[:, 0], a_min=0, a_max=self.original_w)
np_bbx[:, 2] = np.clip(np_bbx[:, 2], a_min=0, a_max=self.original_w)
np_bbx[:, 1] = np.clip(np_bbx[:, 1], a_min=0, a_max=self.original_h)
np_bbx[:, 3] = np.clip(np_bbx[:, 3], a_min=0, a_max=self.original_h)
np_keypoints = np_keypoints.reshape(-1, 17, 3)
np_keypoints[:, :, 0] *= padded_w / self.image_width
np_keypoints[:, :, 1] *= padded_h / self.image_height
np_keypoints[:, :, 0] -= p_b_w
np_keypoints[:, :, 1] -= p_b_h
np_keypoints[:, :, 0] = np_keypoints[:, :, 0].clip(0, self.original_w)
np_keypoints[:, :, 1] = np_keypoints[:, :, 1].clip(0, self.original_h)
np_keypoints[:, :, 2] = np_keypoints[:, :, 2].clip(0, 1)
return np_bbx, np_ids, np_scores, np_keypoints
|
b8de260edce783c65227ce6adc6dfee80569574a
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/日程安排-扫描线+差分/2015_每个线段的平均高度-差分+扫描线.py
|
e20d36ddf096f035774bce2ed754fdae95fff731
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
2015_每个线段的平均高度-差分+扫描线.py
|
from typing import List
from collections import defaultdict
# 将重叠区间内的所有大楼高度求平均值,返回这些区域可进行分段的大楼跨度及其高度平均值数组
# 且遇到楼层平均高度相同的相邻区间时要合并
# 1943. 描述绘画结果-扫描.py
# 1. 使用map记录每个分界处的 差分信息
# 2. 排序,遍历每个分界点,更新 pre/preSum/preCount
class Solution:
def averageHeightOfBuildings(self, buildings: List[List[int]]) -> List[List[int]]:
# 总高度,个数
deltaDict = defaultdict(lambda: [0, 0])
for start, end, delta in buildings:
deltaDict[start][0] += delta
deltaDict[end][0] -= delta
deltaDict[start][1] += 1
deltaDict[end][1] -= 1
res = []
# 区间起点,高度累加,个数累加
pre, preSum, preCount = 0, 0, 0
for cur in sorted(deltaDict):
delta, deltaCount = deltaDict[cur]
if preSum > 0:
cand = [pre, cur, preSum // preCount]
# 区间合并
if res and res[-1][1] == pre and res[-1][2] == cand[2]:
res[-1][1] = cur
else:
res.append(cand)
pre = cur
preSum += delta
preCount += deltaCount
return res
print(Solution().averageHeightOfBuildings(buildings=[[1, 4, 2], [3, 9, 4]]))
# Output: [[1,3,2],[3,4,3],[4,9,4]]
# Explanation:
# From 1 to 3, there is only the first building with an average height of 2 / 1 = 2.
# From 3 to 4, both the first and the second building are there with an average height of (2+4) / 2 = 3.
# From 4 to 9, there is only the second building with an average height of 4 / 1 = 4.
|
f793bcc8dc3af0c6e0ad33e561d9799c949cb916
|
eaba398a0ca5414c10dd1890e662fdcd87e157b6
|
/jirafs/commands/assign.py
|
2ffa71e32abed48cc1e29af97cfb37e9b73b4137
|
[
"MIT"
] |
permissive
|
coddingtonbear/jirafs
|
a78f47e59836d9a6024bc287ea2a1247fb297e62
|
778cba9812f99eeaf726a77c1bca5ae2650a35e9
|
refs/heads/development
| 2023-06-16T00:06:33.262635
| 2022-09-20T04:06:26
| 2022-09-20T04:06:26
| 21,588,191
| 125
| 17
|
MIT
| 2023-06-02T05:48:53
| 2014-07-07T21:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 676
|
py
|
assign.py
|
from jirafs.plugin import CommandPlugin
class Command(CommandPlugin):
"""Assign the current task to a user"""
MIN_VERSION = "2.0.0"
MAX_VERSION = "3.0.0"
def main(self, args, folder, **kwargs):
username = args.username
if not username:
username = folder.get_config().get(
folder.jira_base,
"username",
)
folder.jira.assign_issue(folder.issue, username)
folder.log(
"Successfully assigned %s to %s.",
args=(folder.issue.key, username),
)
def add_arguments(self, parser):
parser.add_argument("username", nargs="?", type=str)
|
2640f7e95a63eeb9b9c4700954479b0cac0ba9fa
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/twisted/plugins/twisted_core.py
|
3fb52f0c7868bd57389740e64f27867d2616e241
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 589
|
py
|
twisted_core.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import absolute_import, division
from twisted.internet.endpoints import (
_SystemdParser, _TCP6ServerParser, _StandardIOParser,
_TLSClientEndpointParser)
from twisted.protocols.haproxy._parser import (
HAProxyServerParser as _HAProxyServerParser
)
systemdEndpointParser = _SystemdParser()
tcp6ServerEndpointParser = _TCP6ServerParser()
stdioEndpointParser = _StandardIOParser()
tlsClientEndpointParser = _TLSClientEndpointParser()
_haProxyServerEndpointParser = _HAProxyServerParser()
|
ebad710cf091b727469b963ca7dc704915032db5
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferImageTest/OpenColorIOConfigPlugTest.py
|
0c85e5fe5d5edaa91d8710aa37143047429bc496
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,254
|
py
|
OpenColorIOConfigPlugTest.py
|
##########################################################################
#
# Copyright (c) 2023, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import Gaffer
import GafferDispatch
import GafferDispatchTest
import GafferImage
import GafferImageTest
class OpenColorIOConfigPlugTest( GafferImageTest.ImageTestCase ) :
def testDefaultConfigPlug( self ) :
script = Gaffer.ScriptNode()
self.assertEqual( GafferImage.OpenColorIOAlgo.getConfig( script.context() ), "" )
self.assertEqual( GafferImage.OpenColorIOAlgo.variables( script.context() ), [] )
self.assertIsNone( GafferImage.OpenColorIOConfigPlug.acquireDefaultConfigPlug( script, createIfNecessary = False ) )
plug = GafferImage.OpenColorIOConfigPlug.acquireDefaultConfigPlug( script )
self.assertIsInstance( plug, GafferImage.OpenColorIOConfigPlug )
self.assertTrue( plug.isSame( GafferImage.OpenColorIOConfigPlug.acquireDefaultConfigPlug( script ) ) )
self.assertEqual( plug.getName(), "openColorIO" )
self.assertEqual( GafferImage.OpenColorIOAlgo.getConfig( script.context() ), "" )
self.assertEqual( GafferImage.OpenColorIOAlgo.variables( script.context() ), [] )
plug["config"].setValue( "test.ocio" )
self.assertEqual( GafferImage.OpenColorIOAlgo.getConfig( script.context() ), "test.ocio" )
plug["variables"].addChild( Gaffer.NameValuePlug( "testA", "testValueA", defaultEnabled = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
plug["variables"].addChild( Gaffer.NameValuePlug( "testB", "testValueB", defaultEnabled = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( set( GafferImage.OpenColorIOAlgo.variables( script.context() ) ), { "testA", "testB" } )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testA" ), "testValueA" )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testB" ), "testValueB" )
plug["variables"][0]["enabled"].setValue( False )
self.assertEqual( GafferImage.OpenColorIOAlgo.variables( script.context() ), [ "testB" ] )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testB" ), "testValueB" )
plug["variables"][1]["value"].setValue( "testValueB2" )
self.assertEqual( GafferImage.OpenColorIOAlgo.variables( script.context() ), [ "testB" ] )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testB" ), "testValueB2" )
plug["variables"][1]["name"].setValue( "testB2" )
self.assertEqual( GafferImage.OpenColorIOAlgo.variables( script.context() ), [ "testB2" ] )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testB2" ), "testValueB2" )
plug["variables"][0]["enabled"].setValue( True )
self.assertEqual( set( GafferImage.OpenColorIOAlgo.variables( script.context() ) ), { "testA", "testB2" } )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testA" ), "testValueA" )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script.context(), "testB2" ), "testValueB2" )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
self.assertEqual( GafferImage.OpenColorIOAlgo.getConfig( script2.context() ), "test.ocio" )
self.assertEqual( set( GafferImage.OpenColorIOAlgo.variables( script2.context() ) ), { "testA", "testB2" } )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script2.context(), "testA" ), "testValueA" )
self.assertEqual( GafferImage.OpenColorIOAlgo.getVariable( script2.context(), "testB2" ), "testValueB2" )
def testConfigAppliesDuringExecution( self ) :
script = Gaffer.ScriptNode()
plug = GafferImage.OpenColorIOConfigPlug.acquireDefaultConfigPlug( script )
plug["config"].setValue( "test.ocio" )
plug["variables"].addChild( Gaffer.NameValuePlug( "testA", "testValueA", defaultEnabled = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
script["writer"] = GafferDispatchTest.TextWriter()
script["writer"]["fileName"].setValue( self.temporaryDirectory() / "test.txt" )
script["writer"]["text"].setValue( "${ocio:config}, ${ocio:stringVar:testA}" )
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() / "testDispatch" )
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ script["writer"] ] )
dispatcher.jobPool().waitForAll()
with open( script["writer"]["fileName"].getValue() ) as f :
self.assertEqual( f.readlines(), [ "test.ocio, testValueA" ] )
if __name__ == "__main__":
unittest.main()
|
a1401c11688379032d81da9036e698402a75beea
|
c675ff5fcd3b13fa39352bb8cac11d75262659a8
|
/reactivex/operators/_switchlatest.py
|
2d54a8a3123c65e2d74c7cb1853768cc95d717df
|
[
"MIT"
] |
permissive
|
ReactiveX/RxPY
|
469eb714996c205989e99899a6f1ab1ae2f42dd0
|
af1663d35810fdcd4c25a3ed2e8f0d71b55c341d
|
refs/heads/master
| 2023-08-14T19:27:40.086304
| 2023-01-08T10:02:08
| 2023-03-04T15:33:19
| 8,946,089
| 4,764
| 467
|
MIT
| 2023-09-05T02:53:16
| 2013-03-22T06:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,906
|
py
|
_switchlatest.py
|
from asyncio import Future
from typing import Any, Callable, Optional, TypeVar, Union
from reactivex import Observable, abc, from_future
from reactivex.disposable import (
CompositeDisposable,
SerialDisposable,
SingleAssignmentDisposable,
)
_T = TypeVar("_T")
def switch_latest_() -> Callable[
[Observable[Union[Observable[_T], "Future[_T]"]]], Observable[_T]
]:
def switch_latest(
source: Observable[Union[Observable[_T], "Future[_T]"]]
) -> Observable[_T]:
"""Partially applied switch_latest operator.
Transforms an observable sequence of observable sequences into
an observable sequence producing values only from the most
recent observable sequence.
Returns:
An observable sequence that at any point in time produces
the elements of the most recent inner observable sequence
that has been received.
"""
def subscribe(
observer: abc.ObserverBase[_T],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
inner_subscription = SerialDisposable()
has_latest = [False]
is_stopped = [False]
latest = [0]
def on_next(inner_source: Union[Observable[_T], "Future[_T]"]) -> None:
nonlocal source
d = SingleAssignmentDisposable()
with source.lock:
latest[0] += 1
_id = latest[0]
has_latest[0] = True
inner_subscription.disposable = d
# Check if Future or Observable
if isinstance(inner_source, Future):
obs = from_future(inner_source)
else:
obs = inner_source
def on_next(x: Any) -> None:
if latest[0] == _id:
observer.on_next(x)
def on_error(e: Exception) -> None:
if latest[0] == _id:
observer.on_error(e)
def on_completed() -> None:
if latest[0] == _id:
has_latest[0] = False
if is_stopped[0]:
observer.on_completed()
d.disposable = obs.subscribe(
on_next, on_error, on_completed, scheduler=scheduler
)
def on_completed() -> None:
is_stopped[0] = True
if not has_latest[0]:
observer.on_completed()
subscription = source.subscribe(
on_next, observer.on_error, on_completed, scheduler=scheduler
)
return CompositeDisposable(subscription, inner_subscription)
return Observable(subscribe)
return switch_latest
__all__ = ["switch_latest_"]
|
09c6b1708a63b9d94d3a4516d4f71dfa9e30cb8a
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/npbench/misc/azimint_naive_test.py
|
522b545f4caed687776da703cb115a879e49cd95
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,744
|
py
|
azimint_naive_test.py
|
# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
# Original application code: NPBench - https://github.com/spcl/npbench
import dace.dtypes
import numpy as np
import dace
import pytest
import argparse
from dace.transformation.auto.auto_optimize import auto_optimize
from dace.fpga_testing import fpga_test
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
N, npt = (dace.symbol(s, dtype=dace.int64) for s in ('N', 'npt'))
def relerror(val, ref):
if np.linalg.norm(ref) == 0:
return np.linalg.norm(val - ref)
return np.linalg.norm(val - ref) / np.linalg.norm(ref)
@dace.program
def dace_azimint_naive(data: dace.float64[N], radius: dace.float64[N]):
rmax = np.amax(radius)
res = np.zeros((npt, ), dtype=np.float64)
for i in range(npt):
r1 = rmax * i / npt
r2 = rmax * (i + 1) / npt
mask_r12 = np.logical_and((r1 <= radius), (radius < r2))
on_values = 0
tmp = np.float64(0)
for j in dace.map[0:N]:
if mask_r12[j]:
tmp += data[j]
on_values += 1
res[i] = tmp / on_values
return res
def numpy_azimint_naive(data, radius, npt):
rmax = radius.max()
res = np.zeros(npt, dtype=np.float64)
for i in range(npt):
r1 = rmax * i / npt
r2 = rmax * (i + 1) / npt
mask_r12 = np.logical_and((r1 <= radius), (radius < r2))
values_r12 = data[mask_r12]
res[i] = values_r12.mean()
return res
def initialize(N):
from numpy.random import default_rng
rng = default_rng(42)
data, radius = rng.random((N, )), rng.random((N, ))
return data, radius
def run_azimint_naive(device_type: dace.dtypes.DeviceType):
'''
Runs azimint-naive for the given device
:return: the SDFG
'''
# Initialize data (npbench S size)
N, npt = (40000, 100)
data, radius = initialize(N)
if device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}:
# Parse the SDFG and apply autopot
sdfg = dace_azimint_naive.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
val = sdfg(data=data, radius=radius, N=N, npt=npt)
elif device_type == dace.dtypes.DeviceType.FPGA:
# Parse SDFG and apply FPGA friendly optimization
sdfg = dace_azimint_naive.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert applied == 1
from dace.libraries.standard import Reduce
Reduce.default_implementation = "FPGAPartialReduction"
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(N=N, npt=npt))
val = sdfg(data=data, radius=radius)
# Compute ground truth and Validate result
ref = numpy_azimint_naive(data, radius, npt)
assert (np.allclose(val, ref) or relerror(val, ref) < 1e-10)
return sdfg
def test_cpu():
run_azimint_naive(dace.dtypes.DeviceType.CPU)
@pytest.mark.gpu
def test_gpu():
run_azimint_naive(dace.dtypes.DeviceType.GPU)
@pytest.mark.skip(reason="Validation error")
@fpga_test(assert_ii_1=False)
def test_fpga():
run_azimint_naive(dace.dtypes.DeviceType.FPGA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", default='cpu', choices=['cpu', 'gpu', 'fpga'], help='Target platform')
args = vars(parser.parse_args())
target = args["target"]
if target == "cpu":
run_azimint_naive(dace.dtypes.DeviceType.CPU)
elif target == "gpu":
run_azimint_naive(dace.dtypes.DeviceType.GPU)
elif target == "fpga":
run_azimint_naive(dace.dtypes.DeviceType.FPGA)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.