blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d14efd1e611a0951c3e3802373c1aa6e91b551bb
|
00f950030a550cfd7e972794ee993d94a92d75be
|
/tests/test_cm.py
|
41fb402c22badf74abf259c3a1b4255bf15f1e95
|
[
"Apache-2.0"
] |
permissive
|
mlcommons/ck
|
6261de6a99934cdd84c413de01d2ee4df6f9217a
|
e4306117546ea01f688afc7540c0ae2e1c007470
|
refs/heads/master
| 2023-09-01T23:26:28.195951
| 2023-09-01T17:26:00
| 2023-09-01T17:26:00
| 26,230,485
| 122
| 33
|
Apache-2.0
| 2023-09-13T12:01:35
| 2014-11-05T17:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
test_cm.py
|
try:
import cmind as cm
r = cm.access(['test', 'script'])
if 'return' not in r:
raise Exception('CM access function should always return key \'return\'!')
exit(0)
except ImportError as e:
from sys import stderr
from subprocess import call
print('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr)
retcode = call(['cm', 'test', 'script'])
exit(retcode)
|
a44794656dda8ff2083c435b95bd92a00e3e99e6
|
9e417620383442e018a677179fa09cebb2ff8c6f
|
/tests/test_reload.py
|
693a306e2137d332cf282ad9070f828739bdf651
|
[
"MIT"
] |
permissive
|
rgc99/irrigation_unlimited
|
12375871953af7fb5ccedd231560d946c51be2d2
|
21345ec6775b3d36eced42aa75178de451fc22b1
|
refs/heads/master
| 2023-08-23T09:22:56.898275
| 2023-08-19T02:00:11
| 2023-08-19T02:00:11
| 332,911,333
| 241
| 48
|
MIT
| 2023-07-30T23:21:42
| 2021-01-25T23:17:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,749
|
py
|
test_reload.py
|
"""Test integration_unlimited reload service calls."""
from datetime import timedelta
import pytest
import homeassistant.core as ha
from homeassistant.util import dt
from custom_components.irrigation_unlimited.const import (
SERVICE_DISABLE,
SERVICE_SUSPEND,
SERVICE_TIME_ADJUST,
)
from tests.iu_test_support import IUExam
IUExam.quiet_mode()
# pylint: disable=unused-argument
async def test_service_reload_basic(
hass: ha.HomeAssistant,
skip_dependencies,
skip_history,
):
"""Test reload service call."""
async with IUExam(hass, "mock_config.yaml") as exam:
sta = hass.states.get("binary_sensor.irrigation_unlimited_c1_m")
assert sta.attributes["friendly_name"] == "Controller 1"
assert sta.attributes["zone_count"] == 1
sta = hass.states.get("binary_sensor.irrigation_unlimited_c1_z1")
assert sta.attributes["friendly_name"] == "Zone 1"
assert sta.attributes["schedule_count"] == 1
await exam.reload("service_reload.yaml")
sta = hass.states.get("binary_sensor.irrigation_unlimited_c1_m")
assert sta.attributes["friendly_name"] == "The First Controller"
assert sta.attributes["zone_count"] == 1
sta = hass.states.get("binary_sensor.irrigation_unlimited_c1_z1")
assert sta.attributes["friendly_name"] == "The First Zone"
assert sta.attributes["schedule_count"] == 2
await exam.begin_test(1)
await exam.finish_test()
exam.check_summary()
async def test_service_reload_survival(
hass: ha.HomeAssistant, skip_dependencies, skip_history
):
"""Test reload preserves current state"""
async with IUExam(hass, "mock_config.yaml") as exam:
adate = dt.now().replace(microsecond=0)
await exam.call(
SERVICE_TIME_ADJUST,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
"zones": 1,
"percentage": "15",
},
)
await exam.call(
SERVICE_DISABLE,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
"zones": 1,
},
)
await exam.call(
SERVICE_SUSPEND,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
"zones": 1,
"until": adate + timedelta(hours=6),
},
)
await exam.call(
SERVICE_TIME_ADJUST,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
"percentage": "25",
},
)
await exam.call(
SERVICE_DISABLE,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
},
)
await exam.call(
SERVICE_SUSPEND,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"sequence_id": 1,
"until": adate + timedelta(hours=12),
},
)
await exam.call(
SERVICE_TIME_ADJUST,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_z1",
"percentage": "50",
},
)
await exam.call(
SERVICE_DISABLE,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_z1",
},
)
await exam.call(
SERVICE_SUSPEND,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_z1",
"until": adate + timedelta(hours=18),
},
)
await exam.call(
SERVICE_DISABLE,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
},
)
await exam.call(
SERVICE_SUSPEND,
{
"entity_id": "binary_sensor.irrigation_unlimited_c1_m",
"until": adate + timedelta(hours=24),
},
)
await exam.reload("service_reload_survival.yaml")
assert exam.coordinator.controllers[0].enabled is False
assert exam.coordinator.controllers[0].suspended == adate + timedelta(hours=24)
assert exam.coordinator.controllers[0].sequences[0].enabled is False
assert exam.coordinator.controllers[0].sequences[
0
].suspended == adate + timedelta(hours=12)
assert str(exam.coordinator.controllers[0].sequences[0].adjustment) == "%25.0"
assert exam.coordinator.controllers[0].sequences[0].zones[0].enabled is False
assert exam.coordinator.controllers[0].sequences[0].zones[
0
].suspended == adate + timedelta(hours=6)
assert (
str(exam.coordinator.controllers[0].sequences[0].zones[0].adjustment)
== "%15.0"
)
assert exam.coordinator.controllers[0].zones[0].enabled is False
assert exam.coordinator.controllers[0].zones[0].suspended == adate + timedelta(
hours=18
)
assert str(exam.coordinator.controllers[0].zones[0].adjustment) == "%50.0"
await exam.run_test(1)
exam.check_summary()
async def test_service_reload_while_on(
hass: ha.HomeAssistant, skip_dependencies, skip_history
):
"""Test reload while zone is on"""
async with IUExam(hass, "mock_config.yaml") as exam:
# Reload while entities are on.
await exam.reload("service_reload_1.yaml")
await exam.begin_test(1)
await exam.run_until("2021-01-04 06:10:00")
assert exam.coordinator.controllers[0].is_on is True
assert exam.coordinator.controllers[0].zones[0].is_on is True
await exam.reload("service_reload_1.yaml")
assert exam.coordinator.controllers[0].is_on is False
assert exam.coordinator.controllers[0].zones[0].is_on is False
await exam.run_all()
exam.check_summary()
async def test_service_reload_error(
hass: ha.HomeAssistant,
skip_dependencies,
skip_history,
):
"""Test reload service call on a bad config file."""
async with IUExam(hass, "mock_config.yaml") as exam:
with pytest.raises(KeyError, match="controllers"):
await exam.reload("service_reload_error.yaml")
async def test_service_reload_extend_shrink(
hass: ha.HomeAssistant,
skip_dependencies,
skip_history,
):
"""Test reload service call expanding and reducing entities."""
async with IUExam(hass, "mock_config.yaml") as exam:
await exam.reload("service_reload_2.yaml")
await exam.run_all()
exam.check_summary()
await exam.reload("service_reload_3.yaml")
await exam.run_all()
exam.check_summary()
await exam.reload("service_reload_1.yaml")
await exam.run_all()
exam.check_summary()
async def test_service_reload_shrink_while_on(
hass: ha.HomeAssistant,
skip_dependencies,
skip_history,
):
"""Test reload service call reducing entities while on."""
async with IUExam(hass, "mock_config.yaml") as exam:
# Reload while entities are on.
await exam.reload("service_reload_while_on.yaml")
await exam.begin_test(1)
await exam.run_until("2021-01-04 06:16:00")
await exam.reload("service_reload_1.yaml")
# The reload mid stream has blown away our test and results. So
# don't attempt to finish or check results, there are none.
# await exam.finish_test()
# check_summary(full_path)
|
5d83b7d25bc8b095451aeb7aab5cb704256db4eb
|
c1b8b6080f29c8037100080298b897618a826475
|
/gammapy/modeling/sherpa.py
|
98be5c036ade861568fba036edbcce1370516417
|
[
"BSD-3-Clause"
] |
permissive
|
gammapy/gammapy
|
a5d7acbdde848e92e124fefbce9716faa296f572
|
60f03adb8fc7851b9f3ca039512c03a669e3fe10
|
refs/heads/main
| 2023-08-16T21:19:06.624561
| 2023-08-04T12:13:08
| 2023-08-04T12:13:08
| 10,073,640
| 204
| 184
|
BSD-3-Clause
| 2023-09-14T15:26:05
| 2013-05-15T07:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
sherpa.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from .likelihood import Likelihood
__all__ = ["optimize_sherpa", "covariance_sherpa"]
def get_sherpa_optimizer(name):
from sherpa.optmethods import GridSearch, LevMar, MonCar, NelderMead
return {
"levmar": LevMar,
"simplex": NelderMead,
"moncar": MonCar,
"gridsearch": GridSearch,
}[name]()
class SherpaLikelihood(Likelihood):
"""Likelihood function interface for Sherpa."""
def fcn(self, factors):
self.parameters.set_parameter_factors(factors)
total_stat = self.function()
if self.store_trace:
self.store_trace_iteration(total_stat)
return total_stat, 0
def optimize_sherpa(parameters, function, store_trace=False, **kwargs):
"""Sherpa optimization wrapper method.
Parameters
----------
parameters : `~gammapy.modeling.Parameters`
Parameter list with starting values.
function : callable
Likelihood function
**kwargs : dict
Options passed to the optimizer instance.
Returns
-------
result : (factors, info, optimizer)
Tuple containing the best fit factors, some info and the optimizer instance.
"""
method = kwargs.pop("method", "simplex")
optimizer = get_sherpa_optimizer(method)
optimizer.config.update(kwargs)
pars = [par.factor for par in parameters.free_parameters]
parmins = [par.factor_min for par in parameters.free_parameters]
parmaxes = [par.factor_max for par in parameters.free_parameters]
statfunc = SherpaLikelihood(function, parameters, store_trace)
with np.errstate(invalid="ignore"):
result = optimizer.fit(
statfunc=statfunc.fcn, pars=pars, parmins=parmins, parmaxes=parmaxes
)
factors = result[1]
info = {
"success": result[0],
"message": result[3],
"nfev": result[4]["nfev"],
"trace": statfunc.trace,
}
return factors, info, optimizer
def covariance_sherpa():
raise NotImplementedError
|
61a2136b173e32c2d1a3ff4ae91ce3cae6117351
|
6bf3efa384abc11398ab9c6cc902c6415bf7e478
|
/pyannotate_runtime/tests/test_collect_types.py
|
3b628f44c60c3169f95e2af19af8acf388194f61
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
dropbox/pyannotate
|
d9ab4919672e69853c6aa47f6b8d4ba85696de98
|
a7a46f394f0ba91a1b5fbf657e2393af542969ae
|
refs/heads/master
| 2022-08-14T03:55:19.082430
| 2021-10-12T20:53:49
| 2021-10-12T20:53:49
| 110,597,280
| 1,482
| 73
|
Apache-2.0
| 2023-02-01T22:35:54
| 2017-11-13T20:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 20,862
|
py
|
test_collect_types.py
|
"""Tests for collect_types"""
from __future__ import (
absolute_import,
division,
print_function,
)
import contextlib
import json
import os
import sched
import sys
import time
import unittest
from collections import namedtuple
from threading import Thread
from six import PY2
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
try:
from typing import Text
except ImportError:
# In Python 3.5.1 stdlib, typing.py does not define Text
Text = str # type: ignore
from pyannotate_runtime import collect_types
# A bunch of random functions and classes to test out type collection
# Disable a whole bunch of lint warnings for simplicity
# pylint:disable=invalid-name
# pylint:disable=blacklisted-name
# pylint:disable=missing-docstring
FooNamedTuple = namedtuple('FooNamedTuple', 'foo bar')
def print_int(i):
# type: (Any) -> Any
print(i)
def noop_dec(a):
# type: (Any) -> Any
return a
def discard(a):
# type: (Any) -> None
pass
@noop_dec
class FoosParent(object):
pass
class FooObject(FoosParent):
class FooNested(object):
pass
class FooReturn(FoosParent):
pass
class WorkerClass(object):
def __init__(self, special_num, foo):
# type: (Any, Any) -> None
self._special_num = special_num
self._foo = foo
@noop_dec
def do_work(self, i, haz):
# type: (Any, Any) -> Any
print_int(i)
return EOFError()
@classmethod
def do_work_clsmthd(cls, i, haz=None):
# type: (Any, Any) -> Any
print_int(i)
return EOFError()
class EventfulHappenings(object):
def __init__(self):
# type: () -> None
self.handlers = [] # type: Any
def add_handler(self, handler):
# type: (Any) -> Any
self.handlers.append(handler)
def something_happened(self, a, b):
# type: (Any, Any) -> Any
for h in self.handlers:
h(a, b)
return 1999
# A class that is old style under python 2
class OldStyleClass:
def foo(self, x):
# type: (Any) -> Any
return x
def i_care_about_whats_happening(y, z):
# type: (Any, Any) -> Any
print_int(y)
print(z)
return FooReturn()
def takes_different_lists(l):
# type: (Any) -> Any
pass
def takes_int_lists(l):
# type: (Any) -> Any
pass
def takes_int_float_lists(l):
# type: (Any) -> Any
pass
def takes_int_to_str_dict(d):
# type: (Any) -> Any
pass
def takes_int_to_multiple_val_dict(d):
# type: (Any) -> Any
pass
def recursive_dict(d):
# type: (Any) -> Any
pass
def empty_then_not_dict(d):
# type: (Any) -> Any
return d
def empty_then_not_list(l):
# type: (Any) -> Any
pass
def tuple_verify(t):
# type: (Any) -> Any
return t
def problematic_dup(uni, bol):
# type: (Text, bool) -> Tuple[Dict[Text, Union[List, int, Text]],bytes]
return {u"foo": [], u"bart": u'ads', u"bax": 23}, b'str'
def two_dict_comprehensions():
# type: () -> Dict[int, Dict[Tuple[int, int], int]]
d = {1: {1: 2}}
return {
i: {
(i, k): l
for k, l in j.items()
}
for i, j in d.items()
}
class TestBaseClass(unittest.TestCase):
def setUp(self):
# type: () -> None
super(TestBaseClass, self).setUp()
# Stats in the same format as the generated JSON.
self.stats = [] # type: List[collect_types.FunctionData]
def tearDown(self):
# type: () -> None
collect_types.stop_types_collection()
def load_stats(self):
# type: () -> None
self.stats = json.loads(collect_types.dumps_stats())
@contextlib.contextmanager
def collecting_types(self):
# type: () -> Iterator[None]
collect_types.collected_args = {}
collect_types.collected_signatures = {}
collect_types.num_samples = {}
collect_types.sampling_counters = {}
collect_types.call_pending = set()
collect_types.start()
yield None
collect_types.stop()
self.load_stats()
def assert_type_comments(self, func_name, comments):
# type: (str, List[str]) -> None
"""Assert that we generated expected comment for the func_name function in self.stats"""
stat_items = [item for item in self.stats if item.get('func_name') == func_name]
if not comments and not stat_items:
# If we expect no comments, it's okay if nothing was collected.
return
assert len(stat_items) == 1
item = stat_items[0]
if set(item['type_comments']) != set(comments):
print('Actual:')
for comment in sorted(item['type_comments']):
print(' ' + comment)
print('Expected:')
for comment in sorted(comments):
print(' ' + comment)
assert set(item['type_comments']) == set(comments)
assert len(item['type_comments']) == len(comments)
assert os.path.join(collect_types.TOP_DIR, item['path']) == __file__
class TestCollectTypes(TestBaseClass):
def setUp(self):
# type: () -> None
super(TestCollectTypes, self).setUp()
collect_types.init_types_collection()
# following type annotations are intentionally use Any,
# because we are testing runtime type collection
def foo(self, int_arg, list_arg):
# type: (Any, Any) -> None
"""foo"""
self.bar(int_arg, list_arg)
def bar(self, int_arg, list_arg):
# type: (Any, Any) -> Any
"""bar"""
return len(self.baz(list_arg)) + int_arg
def baz(self, list_arg):
# type: (Any) -> Any
"""baz"""
return set([int(s) for s in list_arg])
def test_type_collection_on_main_thread(self):
# type: () -> None
with self.collecting_types():
self.foo(2, ['1', '2'])
self.assert_type_comments('TestCollectTypes.foo', ['(int, List[str]) -> None'])
self.assert_type_comments('TestCollectTypes.bar', ['(int, List[str]) -> int'])
self.assert_type_comments('TestCollectTypes.baz', ['(List[str]) -> Set[int]'])
def bar_another_thread(self, int_arg, list_arg):
# type: (Any, Any) -> Any
"""bar"""
return len(self.baz_another_thread(list_arg)) + int_arg
def baz_another_thread(self, list_arg):
# type: (Any) -> Any
"""baz"""
return set([int(s) for s in list_arg])
def test_type_collection_on_another_thread(self):
# type: () -> None
with self.collecting_types():
t = Thread(target=self.bar_another_thread, args=(100, ['1', '2', '3'],))
t.start()
t.join()
self.assert_type_comments('TestCollectTypes.baz_another_thread',
['(List[str]) -> Set[int]'])
def test_run_a_bunch_of_tests(self):
# type: () -> None
with self.collecting_types():
to = FooObject()
wc = WorkerClass(42, to)
s = sched.scheduler(time.time, time.sleep)
event_source = EventfulHappenings()
s.enter(.001, 1, wc.do_work, ([52, 'foo,', 32], FooNamedTuple('ab', 97)))
s.enter(.002, 1, wc.do_work, ([52, 32], FooNamedTuple('bc', 98)))
s.enter(.003, 1, wc.do_work_clsmthd, (52, FooNamedTuple('de', 99)))
s.enter(.004, 1, event_source.add_handler, (i_care_about_whats_happening,))
s.enter(.005, 1, event_source.add_handler, (lambda a, b: print_int(a),))
s.enter(.006, 1, event_source.something_happened, (1, 'tada'))
s.run()
takes_different_lists([42, 'as', 323, 'a'])
takes_int_lists([42, 323, 3231])
takes_int_float_lists([42, 323.2132, 3231])
takes_int_to_str_dict({2: 'a', 4: 'd'})
takes_int_to_multiple_val_dict({3: 'a', 4: None, 5: 232})
recursive_dict({3: {3: 'd'}, 4: {3: 'd'}})
empty_then_not_dict({})
empty_then_not_dict({3: {3: 'd'}, 4: {3: 'd'}})
empty_then_not_list([])
empty_then_not_list([1, 2])
empty_then_not_list([1, 2])
tuple_verify((1, '4'))
tuple_verify((1, '4'))
problematic_dup(u'ha', False)
problematic_dup(u'ha', False)
OldStyleClass().foo(10)
discard(FooObject.FooNested())
# TODO(svorobev): add checks for the rest of the functions
# print_int,
self.assert_type_comments(
'WorkerClass.__init__',
['(int, pyannotate_runtime.tests.test_collect_types.FooObject) -> None'])
self.assert_type_comments(
'do_work_clsmthd',
['(int, pyannotate_runtime.tests.test_collect_types.FooNamedTuple) -> EOFError'])
self.assert_type_comments('OldStyleClass.foo', ['(int) -> int'])
# Need __qualname__ to get this right
if sys.version_info >= (3, 3):
self.assert_type_comments(
'discard',
['(pyannotate_runtime.tests.test_collect_types:FooObject.FooNested) -> None'])
# TODO: that could be better
self.assert_type_comments('takes_different_lists', ['(List[Union[int, str]]) -> None'])
# TODO: that should work
# self.assert_type_comment('empty_then_not_dict',
# '(Dict[int, Dict[int, str]]) -> Dict[int, Dict[int, str]]')
self.assert_type_comments('empty_then_not_list', ['(List[int]) -> None',
'(List) -> None'])
if PY2:
self.assert_type_comments(
'problematic_dup',
['(unicode, bool) -> Tuple[Dict[unicode, Union[List, int, unicode]], str]'])
else:
self.assert_type_comments(
'problematic_dup',
['(str, bool) -> Tuple[Dict[str, Union[List, int, str]], bytes]'])
def test_two_signatures(self):
# type: () -> None
def identity(x):
# type: (Any) -> Any
return x
with self.collecting_types():
identity(1)
identity('x')
self.assert_type_comments('identity', ['(int) -> int', '(str) -> str'])
def test_many_signatures(self):
# type: () -> None
def identity2(x):
# type: (Any) -> Any
return x
with self.collecting_types():
for x in 1, 'x', 2, 'y', slice(1), 1.1, None, False, bytearray(), (), [], set():
for _ in range(50):
identity2(x)
# We collect at most 8 distinct signatures.
self.assert_type_comments('identity2', ['(int) -> int',
'(str) -> str',
'(slice) -> slice',
'(float) -> float',
'(None) -> None',
'(bool) -> bool',
'(bytearray) -> bytearray',
'(Tuple[]) -> Tuple[]'])
def test_default_args(self):
# type: () -> None
def func_default(x=0, y=None):
# type: (Any, Any) -> Any
return x
with self.collecting_types():
func_default()
func_default('')
func_default(1.1, True)
self.assert_type_comments('func_default', ['(int, None) -> int',
'(str, None) -> str',
'(float, bool) -> float'])
def test_keyword_args(self):
# type: () -> None
def func_kw(x, y):
# type: (Any, Any) -> Any
return x
with self.collecting_types():
func_kw(y=1, x='')
func_kw(**{'x': 1.1, 'y': None})
self.assert_type_comments('func_kw', ['(str, int) -> str',
'(float, None) -> float'])
def test_no_return(self):
# type: () -> None
def func_always_fail(x):
# type: (Any) -> Any
raise ValueError
def func_sometimes_fail(x):
# type: (Any) -> Any
if x == 0:
raise RuntimeError
return x
with self.collecting_types():
try:
func_always_fail(1)
except Exception:
pass
try:
func_always_fail('')
except Exception:
pass
try:
func_always_fail(1)
except Exception:
pass
try:
func_sometimes_fail(0)
except Exception:
pass
func_sometimes_fail('')
try:
func_sometimes_fail(0)
except Exception:
pass
self.assert_type_comments('func_always_fail', ['(int) -> pyannotate_runtime.collect_types.NoReturnType',
'(str) -> pyannotate_runtime.collect_types.NoReturnType'])
self.assert_type_comments('func_sometimes_fail', ['(int) -> pyannotate_runtime.collect_types.NoReturnType',
'(str) -> str'])
def test_only_return(self):
# type: () -> None
def only_return(x):
# type: (int) -> str
collect_types.start()
return ''
only_return(1)
collect_types.stop()
self.load_stats()
# No entry is stored if we only have a return event with no matching call.
self.assert_type_comments('only_return', [])
def test_callee_star_args(self):
# type: () -> None
def callee_star_args(x, *y):
# type: (Any, *Any) -> Any
return 0
with self.collecting_types():
callee_star_args(0)
callee_star_args(1, '')
callee_star_args(slice(1), 1.1, True)
callee_star_args(*(False, 1.1, ''))
self.assert_type_comments('callee_star_args', ['(int) -> int',
'(int, *str) -> int',
'(slice, *Union[bool, float]) -> int',
'(bool, *Union[float, str]) -> int'])
def test_caller_star_args(self):
# type: () -> None
def caller_star_args(x, y=None):
# type: (Any, Any) -> Any
return 0
with self.collecting_types():
caller_star_args(*(1,))
caller_star_args(*('', 1.1))
self.assert_type_comments('caller_star_args', ['(int, None) -> int',
'(str, float) -> int'])
def test_star_star_args(self):
# type: () -> None
def star_star_args(x, **kw):
# type: (Any, **Any) -> Any
return 0
with self.collecting_types():
star_star_args(1, y='', z=True)
star_star_args(**{'x': True, 'a': 1.1})
self.assert_type_comments('star_star_args', ['(int) -> int',
'(bool) -> int'])
def test_fully_qualified_type_name_with_sub_package(self):
# type: () -> None
def identity_qualified(x):
# type: (Any) -> Any
return x
with self.collecting_types():
identity_qualified(collect_types.TentativeType())
self.assert_type_comments(
'identity_qualified',
['(pyannotate_runtime.collect_types.TentativeType) -> '
'pyannotate_runtime.collect_types.TentativeType'])
def test_recursive_function(self):
# type: () -> None
def recurse(x):
# type: (Any) -> Any
if len(x) == 0:
return 1.1
else:
recurse(x[1:])
return x[0]
with self.collecting_types():
recurse((1, '', True))
self.assert_type_comments(
'recurse',
['(Tuple[]) -> float',
'(Tuple[bool]) -> pyannotate_runtime.collect_types.UnknownType',
'(Tuple[str, bool]) -> pyannotate_runtime.collect_types.UnknownType',
'(Tuple[int, str, bool]) -> pyannotate_runtime.collect_types.UnknownType'])
def test_recursive_function_2(self):
# type: () -> None
def recurse(x):
# type: (Any) -> Any
if x == 0:
recurse('')
recurse(1.1)
return False
else:
return x
with self.collecting_types():
# The return event for the initial call is mismatched because of
# the recursive calls, so we'll have to drop the return type.
recurse(0)
self.assert_type_comments(
'recurse',
['(str) -> str',
'(float) -> float',
'(int) -> pyannotate_runtime.collect_types.UnknownType'])
def test_ignoring_c_calls(self):
# type: () -> None
def func(x):
# type: (Any) -> Any
a = [1]
# Each of these generates a c_call/c_return event pair.
y = len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a)
y = len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a), len(a)
str(y)
return x
with self.collecting_types():
func(1)
func('')
self.assert_type_comments('func', ['(int) -> int',
'(str) -> str'])
def test_no_crash_on_nested_dict_comps(self):
# type: () -> None
with self.collecting_types():
two_dict_comprehensions()
self.assert_type_comments('two_dict_comprehensions',
['() -> Dict[int, Dict[Tuple[int, int], int]]'])
def test_skip_lambda(self):
# type: () -> None
with self.collecting_types():
(lambda: None)()
(lambda x: x)(0)
(lambda x, y: x+y)(0, 0)
assert self.stats == []
def test_unknown_module_types(self):
# type: () -> None
def func_with_unknown_module_types(c):
# type: (Any) -> Any
return c
with self.collecting_types():
ns = {
'__name__': '<unknown>'
} # type: Dict[str, Any]
exec('class C(object): pass', ns)
func_with_unknown_module_types(ns['C']())
self.assert_type_comments('func_with_unknown_module_types', ['(C) -> C'])
def test_yield_basic(self):
# type: () -> None
def gen(n, a):
for i in range(n):
yield a
with self.collecting_types():
list(gen(10, 'x'))
self.assert_type_comments('gen', ['(int, str) -> Iterator[str]'])
def test_yield_various(self):
# type: () -> None
def gen(n, a, b):
for i in range(n):
yield a
yield b
with self.collecting_types():
list(gen(10, 'x', 1))
list(gen(0, 0, 0))
# TODO: This should really return Iterator[Union[int, str]]
self.assert_type_comments('gen', ['(int, str, int) -> Iterator[int]',
'(int, str, int) -> Iterator[str]'])
def test_yield_empty(self):
# type: () -> None
def gen():
if False:
yield
with self.collecting_types():
list(gen())
self.assert_type_comments('gen', ['() -> Iterator'])
def foo(arg):
# type: (Any) -> Any
return [arg]
class TestInitWithFilter(TestBaseClass):
def always_foo(self, filename):
# type: (Optional[str]) -> Optional[str]
return 'foo.py'
def always_none(self, filename):
# type: (Optional[str]) -> Optional[str]
return None
def test_init_with_filter(self):
# type: () -> None
collect_types.init_types_collection(self.always_foo)
with self.collecting_types():
foo(42)
assert len(self.stats) == 1
assert self.stats[0]['path'] == 'foo.py'
def test_init_with_none_filter(self):
# type: () -> None
collect_types.init_types_collection(self.always_none)
with self.collecting_types():
foo(42)
assert self.stats == []
|
7e0e5af3eb73226ef627668e39d71d2e173b7938
|
d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec
|
/pymunk/tests/test_shape.py
|
a328db1f196fd651befe88aeae0d6d2dd385f85b
|
[
"MIT"
] |
permissive
|
viblo/pymunk
|
ca64888e45706db431788368ff8464edf2912d5f
|
20ac14f665fb38b4ef1bef5acea36a3d612dd0d5
|
refs/heads/master
| 2023-08-27T16:37:14.740653
| 2023-08-16T19:26:16
| 2023-08-16T19:26:16
| 13,273,472
| 855
| 255
|
MIT
| 2023-01-13T10:13:47
| 2013-10-02T14:36:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,556
|
py
|
test_shape.py
|
import pickle
import unittest
from typing import Any
import pymunk as p
class UnitTestShape(unittest.TestCase):
def testId(self) -> None:
c = p.Circle(None, 4)
self.assertGreater(c._id, 0)
def testPointQuery(self) -> None:
b = p.Body(10, 10)
c = p.Circle(b, 5)
c.cache_bb()
info = c.point_query((0, 0))
self.assertEqual(info.shape, c)
self.assertEqual(info.point, (0, 0))
self.assertEqual(info.distance, -5)
self.assertEqual(info.gradient, (0, 1))
info = c.point_query((11, 0))
self.assertEqual(info.shape, c)
self.assertEqual(info.point, (5, 0))
self.assertEqual(info.distance, 6)
self.assertEqual(info.gradient, (1, 0))
def testSegmentQuery(self) -> None:
s = p.Space()
b = p.Body(10, 10)
c = p.Circle(b, 5)
c.cache_bb()
info = c.segment_query((10, -50), (10, 50))
self.assertEqual(info.shape, None)
self.assertEqual(info.point, (10, 50))
self.assertEqual(info.normal, (0, 0))
self.assertEqual(info.alpha, 1.0)
info = c.segment_query((10, -50), (10, 50), 6)
self.assertEqual(info.shape, c)
info = c.segment_query((0, -50), (0, 50))
self.assertEqual(info.shape, c)
self.assertAlmostEqual(info.point.x, 0)
self.assertAlmostEqual(info.point.y, -5)
self.assertAlmostEqual(info.normal.x, 0)
self.assertAlmostEqual(info.normal.y, -1)
self.assertEqual(info.alpha, 0.45)
def testMass(self) -> None:
c = p.Circle(None, 1)
self.assertEqual(c.mass, 0)
c.mass = 2
self.assertEqual(c.mass, 2)
def testDensity(self) -> None:
c = p.Circle(None, 1)
self.assertEqual(c.density, 0)
c.density = 2
self.assertEqual(c.density, 2)
def testMoment(self) -> None:
c = p.Circle(None, 5)
self.assertEqual(c.moment, 0)
c.density = 2
self.assertAlmostEqual(c.moment, 1963.4954084936207)
c.density = 0
c.mass = 2
self.assertAlmostEqual(c.moment, 25)
def testArea(self) -> None:
c = p.Circle(None, 5)
self.assertEqual(c.area, 78.53981633974483)
def testCenterOfGravity(self) -> None:
c = p.Circle(None, 5)
self.assertEqual(c.center_of_gravity, (0, 0))
c = p.Circle(None, 5, (10, 5))
self.assertEqual(c.center_of_gravity.x, 10)
self.assertEqual(c.center_of_gravity.y, 5)
def testNoBody(self) -> None:
c = p.Circle(None, 1)
self.assertEqual(c.body, None)
def testRemoveBody(self) -> None:
b = p.Body(1, 1)
c = p.Circle(b, 1)
c.body = None
self.assertEqual(c.body, None)
self.assertEqual(len(b.shapes), 0)
def testSwitchBody(self) -> None:
b1 = p.Body(1, 1)
b2 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.body, b1)
self.assertTrue(c in b1.shapes)
self.assertTrue(c not in b2.shapes)
c.body = b2
self.assertEqual(c.body, b2)
self.assertTrue(c not in b1.shapes)
self.assertTrue(c in b2.shapes)
def testSensor(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertFalse(c.sensor)
c.sensor = True
self.assertTrue(c.sensor)
def testElasticity(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.elasticity, 0)
c.elasticity = 1
self.assertEqual(c.elasticity, 1)
def testFriction(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.friction, 0)
c.friction = 1
self.assertEqual(c.friction, 1)
def testSurfaceVelocity(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.surface_velocity, (0, 0))
c.surface_velocity = (1, 2)
self.assertEqual(c.surface_velocity, (1, 2))
def testCollisionType(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.collision_type, 0)
c.collision_type = 1
self.assertEqual(c.collision_type, 1)
def testFilter(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.filter, p.ShapeFilter(0, 0xFFFFFFFF, 0xFFFFFFFF))
c.filter = p.ShapeFilter(1, 0xFFFFFFF2, 0xFFFFFFF3)
self.assertEqual(c.filter, p.ShapeFilter(1, 0xFFFFFFF2, 0xFFFFFFF3))
def testSpace(self) -> None:
b1 = p.Body(1, 1)
c = p.Circle(b1, 1)
self.assertEqual(c.space, None)
s = p.Space()
s.add(b1, c)
self.assertEqual(c.space, s)
def testShapesCollide(self) -> None:
b1 = p.Body(1, 1)
s1 = p.Circle(b1, 10)
b2 = p.Body(1, 1)
b2.position = 30, 30
s2 = p.Circle(b2, 10)
c = s1.shapes_collide(s2)
self.assertEqual(c.normal, (1, 0))
self.assertEqual(len(c.points), 1)
point = c.points[0]
self.assertEqual(point.point_a, (10, 0))
self.assertEqual(point.point_b, (-10, 0))
self.assertEqual(point.distance, -20)
def testPickle(self) -> None:
b = p.Body(1, 2)
c = p.Circle(b, 3, (4, 5))
c.sensor = True
c.collision_type = 6
c.filter = p.ShapeFilter()
c.elasticity = 7
c.friction = 8
c.surface_velocity = (9, 10)
s = pickle.dumps(c)
c2 = pickle.loads(s)
self.assertEqual(c.sensor, c2.sensor)
self.assertEqual(c.collision_type, c2.collision_type)
self.assertEqual(c.filter, c2.filter)
self.assertEqual(c.elasticity, c2.elasticity)
self.assertEqual(c.friction, c2.friction)
self.assertEqual(c.surface_velocity, c2.surface_velocity)
self.assertEqual(c.density, c2.density)
self.assertEqual(c.mass, c2.mass)
self.assertEqual(c.body.mass, c2.body.mass)
c = p.Circle(None, 1)
c.density = 3
s = pickle.dumps(c)
c2 = pickle.loads(s)
self.assertEqual(c.mass, c2.mass)
self.assertEqual(c.density, c2.density)
c2 = c.copy()
class UnitTestCircle(unittest.TestCase):
def testCircleBB(self) -> None:
b = p.Body(10, 10)
c = p.Circle(b, 5)
c.cache_bb()
self.assertEqual(c.bb, p.BB(-5.0, -5.0, 5.0, 5.0))
def testCircleNoBody(self) -> None:
c = p.Circle(None, 5)
bb = c.update(p.Transform(1, 2, 3, 4, 5, 6))
self.assertEqual(c.bb, bb)
self.assertEqual(c.bb, p.BB(0, 1, 10, 11))
def testOffset(self) -> None:
c = p.Circle(None, 5, (1, 2))
self.assertEqual(c.offset, (1, 2))
def testOffsetUnsafe(self) -> None:
c = p.Circle(None, 5, (1, 2))
c.unsafe_set_offset((3, 4))
self.assertEqual(c.offset, (3, 4))
def testRadius(self) -> None:
c = p.Circle(None, 5)
self.assertEqual(c.radius, 5)
def testRadiusUnsafe(self) -> None:
c = p.Circle(None, 5)
c.unsafe_set_radius(3)
self.assertEqual(c.radius, 3)
def testPickle(self) -> None:
c = p.Circle(None, 3, (4, 5))
s = pickle.dumps(c)
c2 = pickle.loads(s)
self.assertEqual(c.radius, c2.radius)
self.assertEqual(c.offset, c2.offset)
class UnitTestSegment(unittest.TestCase):
def testBB(self) -> None:
s = p.Space()
b = p.Body(10, 10)
c = p.Segment(b, (2, 2), (2, 3), 2)
c.cache_bb()
self.assertEqual(c.bb, p.BB(0, 0, 4.0, 5.0))
def testProperties(self) -> None:
c = p.Segment(None, (2, 2), (2, 3), 4)
self.assertEqual(c.a, (2, 2))
self.assertEqual(c.b, (2, 3))
self.assertEqual(c.normal, (1, 0))
self.assertEqual(c.radius, 4)
def testPropertiesUnsafe(self) -> None:
c = p.Segment(None, (2, 2), (2, 3), 4)
c.unsafe_set_endpoints((3, 4), (5, 6))
self.assertEqual(c.a, (3, 4))
self.assertEqual(c.b, (5, 6))
c.unsafe_set_radius(5)
self.assertEqual(c.radius, 5)
def testSetNeighbors(self) -> None:
c = p.Segment(None, (2, 2), (2, 3), 1)
c.set_neighbors((2, 2), (2, 3))
def testSegmentSegmentCollision(self) -> None:
s = p.Space()
b1 = p.Body(10, 10)
c1 = p.Segment(b1, (-1, -1), (1, 1), 1)
b2 = p.Body(10, 10)
c2 = p.Segment(b2, (1, -1), (-1, 1), 1)
s.add(b1, b2, c1, c2)
self.num_of_begins = 0
def begin(arb: p.Arbiter, space: p.Space, data: Any) -> bool:
self.num_of_begins += 1
return True
s.add_default_collision_handler().begin = begin
s.step(0.1)
self.assertEqual(1, self.num_of_begins)
def testPickle(self) -> None:
c = p.Segment(None, (1, 2), (3, 4), 5)
s = pickle.dumps(c)
c2 = pickle.loads(s)
self.assertEqual(c.a, c2.a)
self.assertEqual(c.b, c2.b)
self.assertEqual(c.radius, c2.radius)
class UnitTestPoly(unittest.TestCase):
def testInit(self) -> None:
c = p.Poly(None, [(0, 0), (10, 10), (20, 0), (-10, 10)], None, 0)
b = p.Body(1, 2)
c = p.Poly(b, [(0, 0), (10, 10), (20, 0), (-10, 10)], p.Transform.identity(), 6)
def testVertices(self) -> None:
vs = [(-10, 10), (0, 0), (20, 0), (10, 10)]
c = p.Poly(None, vs, None, 0)
self.assertEqual(c.get_vertices(), vs)
c = p.Poly(None, vs, p.Transform(1, 2, 3, 4, 5, 6), 0)
vs2 = [(5.0, 6.0), (25.0, 26.0), (45.0, 66.0), (25.0, 46.0)]
self.assertEqual(c.get_vertices(), vs2)
def testVerticesUnsafe(self) -> None:
vs = [(-10, 10), (0, 0), (20, 0), (10, 10)]
c = p.Poly(None, vs, None, 0)
vs2 = [(-3, 3), (0, 0), (3, 0)]
c.unsafe_set_vertices(vs2)
self.assertEqual(c.get_vertices(), vs2)
vs3 = [(-4, 4), (0, 0), (4, 0)]
c.unsafe_set_vertices(vs3, p.Transform.identity())
self.assertEqual(c.get_vertices(), vs3)
def testBB(self) -> None:
c = p.Poly(None, [(2, 2), (4, 3), (3, 5)])
bb = c.update(p.Transform.identity())
self.assertEqual(bb, c.bb)
self.assertEqual(c.bb, p.BB(2, 2, 4, 5))
b = p.Body(1, 2)
c = p.Poly(b, [(2, 2), (4, 3), (3, 5)])
c.cache_bb()
self.assertEqual(c.bb, p.BB(2, 2, 4, 5))
s = p.Space()
b = p.Body(1, 2)
c = p.Poly(b, [(2, 2), (4, 3), (3, 5)])
s.add(b, c)
self.assertEqual(c.bb, p.BB(2, 2, 4, 5))
def testRadius(self) -> None:
c = p.Poly(None, [(2, 2), (4, 3), (3, 5)], radius=10)
self.assertEqual(c.radius, 10)
def testRadiusUnsafe(self) -> None:
c = p.Poly(None, [(2, 2), (4, 3), (3, 5)], radius=10)
c.unsafe_set_radius(20)
self.assertEqual(c.radius, 20)
def testCreateBox(self) -> None:
c = p.Poly.create_box(None, (4, 2), 3)
self.assertEqual(c.get_vertices(), [(2, -1), (2, 1), (-2, 1), (-2, -1)])
c = p.Poly.create_box_bb(None, p.BB(1, 2, 3, 4), 3)
self.assertEqual(c.get_vertices(), [(3, 2), (3, 4), (1, 4), (1, 2)])
def testPickle(self) -> None:
c = p.Poly(None, [(1, 2), (3, 4), (5, 6)], radius=5)
s = pickle.dumps(c)
c2 = pickle.loads(s)
self.assertEqual(c.get_vertices(), c2.get_vertices())
self.assertEqual(c.radius, c2.radius)
|
8c32376ea1fe4490b927b66bc7749ee6d683b619
|
50927fa2c786a18436526345e4aca1490aa031dc
|
/core/src/main/targetconfigs/k8s/k8s_operator_filter.py
|
41c32cc8bcc32d1122cfdd507ed49d2b5569759b
|
[
"UPL-1.0",
"LicenseRef-scancode-other-copyleft",
"MIT",
"GPL-2.0-only",
"Classpath-exception-2.0",
"Apache-2.0",
"CDDL-1.1"
] |
permissive
|
oracle/weblogic-deploy-tooling
|
c3646c297ac482fed921fb599182d557cf77d532
|
9fd74ae578a5b1353662facb0405e5672ecc5191
|
refs/heads/main
| 2023-09-01T08:40:12.305524
| 2023-08-26T13:26:37
| 2023-08-26T13:26:37
| 120,652,037
| 148
| 108
|
UPL-1.0
| 2023-09-14T21:03:06
| 2018-02-07T18:08:30
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
k8s_operator_filter.py
|
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# ------------
# Description:
# ------------
# This filter can be extended to prepare a model for deployment to a Kubernetes environment,
# using the the createDomain or prepareModel tools.
# For information about extending the filter, see the WebLogic Deployment Tooling documentation:
# https://oracle.github.io/weblogic-deploy-tooling/userguide/tools-config/model_filters/
def filter_model(model):
pass
|
d7736453b86bd42c4fa6cbee867483c988f340bd
|
5cb38042bdf1126609c1ad0a19edad47e3bdbc55
|
/Lecture4/lectureCode_Lec4-circle.py
|
20ecf6a42a9af48d76d092593d7a7b92409638ad
|
[] |
no_license
|
y0m0/MIT.6.00.1x
|
0e14c05ed0ffefefce49d504de853d0db39ad02d
|
c10fa1615879f35f822d73d6fa209566ce66b47f
|
refs/heads/master
| 2021-01-01T05:25:25.403150
| 2020-12-05T14:05:06
| 2020-12-05T14:05:06
| 57,986,412
| 180
| 95
| null | 2022-08-08T12:07:09
| 2016-05-03T17:03:36
|
Python
|
UTF-8
|
Python
| false
| false
| 149
|
py
|
lectureCode_Lec4-circle.py
|
# circle.py
# From Lecture 4, Modules
pi = 3.14159
def area(radius):
return pi*(radius**2)
def circumference(radius):
return 2*pi*radius
|
e7946e43bf9711f61bc4270b5519abfd8e7a1800
|
65078b8087c2040cf0188e2550ea298d20518f62
|
/src/bentoml/_internal/server/grpc_app.py
|
4735bc13d80795b6ca9d2a395ed605596f3adc04
|
[
"Apache-2.0"
] |
permissive
|
bentoml/BentoML
|
20ab6f8351b1c5cd116d6d60a28098246a1581b3
|
4a14f073d8a3e700aff29483b17ea053058c0c63
|
refs/heads/main
| 2023-09-05T16:03:08.909692
| 2023-09-04T18:54:33
| 2023-09-04T18:54:33
| 178,976,529
| 5,712
| 732
|
Apache-2.0
| 2023-09-14T20:07:54
| 2019-04-02T01:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 13,583
|
py
|
grpc_app.py
|
from __future__ import annotations
import asyncio
import inspect
import logging
import os
import sys
import typing as t
from concurrent.futures import ThreadPoolExecutor
from functools import cached_property
from functools import partial
from typing import TYPE_CHECKING
from simple_di import Provide
from simple_di import inject
from ...grpc.utils import LATEST_PROTOCOL_VERSION
from ...grpc.utils import import_generated_stubs
from ...grpc.utils import import_grpc
from ...grpc.utils import load_from_file
from ..configuration.containers import BentoMLContainer
from ..context import ServiceContext as Context
from ..utils import LazyLoader
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
import grpc
from grpc import aio
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2 as pb_health
from grpc_health.v1 import health_pb2_grpc as services_health
from ...grpc.types import Interceptors
from ..service import Service
from ..types import LifecycleHook
else:
grpc, aio = import_grpc()
health_exception_msg = "'grpcio-health-checking' is required for using health checking endpoints. Install with 'pip install grpcio-health-checking'."
pb_health = LazyLoader(
"pb_health",
globals(),
"grpc_health.v1.health_pb2",
exc_msg=health_exception_msg,
)
services_health = LazyLoader(
"services_health",
globals(),
"grpc_health.v1.health_pb2_grpc",
exc_msg=health_exception_msg,
)
health = LazyLoader(
"health",
globals(),
"grpc_health.v1.health",
exc_msg="'grpcio-health-checking' is required for using health checking endpoints. Install with 'pip install grpcio-health-checking'.",
)
# NOTE: we are using the internal aio._server.Server (which is initialized with aio.server)
class Server(aio._server.Server):
"""An async implementation of a gRPC server."""
@inject
def __init__(
self,
bento_service: Service,
bind_address: str,
max_message_length: int
| None = Provide[BentoMLContainer.grpc.max_message_length],
maximum_concurrent_rpcs: int
| None = Provide[BentoMLContainer.grpc.maximum_concurrent_rpcs],
enable_reflection: bool = False,
enable_channelz: bool = False,
max_concurrent_streams: int | None = None,
migration_thread_pool_workers: int = 1,
ssl_certfile: str | None = None,
ssl_keyfile: str | None = None,
ssl_ca_certs: str | None = None,
graceful_shutdown_timeout: float | None = None,
compression: grpc.Compression | None = None,
protocol_version: str = LATEST_PROTOCOL_VERSION,
):
pb, _ = import_generated_stubs(protocol_version)
self.bento_service = bento_service
self.servicer = bento_service.get_grpc_servicer(protocol_version)
# options
self.max_message_length = max_message_length
self.max_concurrent_streams = max_concurrent_streams
self.bind_address = bind_address
self.enable_reflection = enable_reflection
self.enable_channelz = enable_channelz
self.graceful_shutdown_timeout = graceful_shutdown_timeout
self.ssl_certfile = ssl_certfile
self.ssl_keyfile = ssl_keyfile
self.ssl_ca_certs = ssl_ca_certs
self.protocol_version = protocol_version
# Create a health check servicer. We use the non-blocking implementation
# to avoid thread starvation.
self.health_servicer = health.aio.HealthServicer()
self.mount_servicers = self.bento_service.mount_servicers
self.service_names = tuple(
service.full_name for service in pb.DESCRIPTOR.services_by_name.values()
) + (health.SERVICE_NAME,)
super().__init__(
# Note that the max_workers are used inside ThreadPoolExecutor.
# This ThreadPoolExecutor are used by aio.Server() to execute non-AsyncIO RPC handlers.
# Setting it to 1 makes it thread-safe for sync APIs.
thread_pool=ThreadPoolExecutor(max_workers=migration_thread_pool_workers),
generic_handlers=() if self.handlers is None else self.handlers,
interceptors=list(map(lambda x: x(), self.interceptors)),
options=self.options,
# maximum_concurrent_rpcs defines the maximum number of concurrent RPCs this server
# will service before returning RESOURCE_EXHAUSTED status.
# Set to None will indicate no limit.
maximum_concurrent_rpcs=maximum_concurrent_rpcs,
compression=compression,
)
@inject
async def wait_for_runner_ready(
self,
*,
check_interval: int = Provide[
BentoMLContainer.api_server_config.runner_probe.period
],
):
if BentoMLContainer.api_server_config.runner_probe.enabled.get():
logger.info("Waiting for runners to be ready...")
logger.debug("Current runners: %r", self.bento_service.runners)
while True:
try:
runner_statuses = (
runner.runner_handle_is_ready()
for runner in self.bento_service.runners
)
runners_ready = all(await asyncio.gather(*runner_statuses))
if runners_ready:
break
except ConnectionError as e:
logger.debug("[%s] Retrying ...", e)
await asyncio.sleep(check_interval)
logger.info("All runners ready.")
@property
def options(self) -> grpc.aio.ChannelArgumentType:
options: grpc.aio.ChannelArgumentType = []
if sys.platform != "win32":
# https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h#L294
# Eventhough GRPC_ARG_ALLOW_REUSEPORT is set to 1 by default, we want still
# want to explicitly set it to 1 so that we can spawn multiple gRPC servers in
# production settings.
options.append(("grpc.so_reuseport", 1))
if self.max_concurrent_streams:
options.append(("grpc.max_concurrent_streams", self.max_concurrent_streams))
if self.enable_channelz:
options.append(("grpc.enable_channelz", 1))
if self.max_message_length:
options.extend(
(
# grpc.max_message_length this is a deprecated options, for backward compatibility
("grpc.max_message_length", self.max_message_length),
("grpc.max_receive_message_length", self.max_message_length),
("grpc.max_send_message_length", self.max_message_length),
)
)
return tuple(options)
@property
def interceptors(self) -> Interceptors:
# Note that order of interceptors is important here.
from ...grpc.interceptors.opentelemetry import (
AsyncOpenTelemetryServerInterceptor,
)
interceptors: Interceptors = [AsyncOpenTelemetryServerInterceptor]
if BentoMLContainer.api_server_config.metrics.enabled.get():
from ...grpc.interceptors.prometheus import PrometheusServerInterceptor
interceptors.append(PrometheusServerInterceptor)
if BentoMLContainer.api_server_config.logging.access.enabled.get():
from ...grpc.interceptors.access import AccessLogServerInterceptor
access_logger = logging.getLogger("bentoml.access")
if access_logger.getEffectiveLevel() <= logging.INFO:
interceptors.append(AccessLogServerInterceptor)
# add users-defined interceptors.
interceptors.extend(self.bento_service.interceptors)
return interceptors
@property
def handlers(self) -> t.Sequence[grpc.GenericRpcHandler] | None:
# Note that currently BentoML doesn't provide any specific
# handlers for gRPC. If users have any specific handlers,
# BentoML will pass it through to grpc.aio.Server
return self.bento_service.grpc_handlers
@cached_property
def loop(self) -> asyncio.AbstractEventLoop:
return asyncio.get_event_loop()
def run(self) -> None:
try:
self.loop.run_until_complete(self.serve())
finally:
try:
self.loop.call_soon_threadsafe(
lambda: asyncio.ensure_future(self.shutdown())
)
except Exception as e: # pylint: disable=broad-except
raise RuntimeError(f"Server failed unexpectedly: {e}") from None
@cached_property
def context(self) -> Context:
return Context()
def configure_port(self, addr: str):
if self.ssl_certfile:
client_auth = False
ca_cert = None
assert (
self.ssl_keyfile
), "'ssl_keyfile' is required when 'ssl_certfile' is provided."
if self.ssl_ca_certs is not None:
client_auth = True
ca_cert = load_from_file(self.ssl_ca_certs)
server_credentials = grpc.ssl_server_credentials(
(
(
load_from_file(self.ssl_keyfile),
load_from_file(self.ssl_certfile),
),
),
root_certificates=ca_cert,
require_client_auth=client_auth,
)
self.add_secure_port(addr, server_credentials)
else:
self.add_insecure_port(addr)
async def serve(self) -> None:
self.configure_port(self.bind_address)
await self.startup()
await self.wait_for_termination()
@property
def on_startup(self) -> list[LifecycleHook]:
on_startup = [
*self.bento_service.startup_hooks,
self.bento_service.on_grpc_server_startup,
]
if BentoMLContainer.development_mode.get():
for runner in self.bento_service.runners:
on_startup.append(partial(runner.init_local, quiet=True))
else:
for runner in self.bento_service.runners:
if runner.embedded:
on_startup.append(partial(runner.init_local, quiet=True))
else:
on_startup.append(runner.init_client)
on_startup.append(self.wait_for_runner_ready)
return on_startup
async def startup(self) -> None:
from ...exceptions import MissingDependencyException
_, services = import_generated_stubs(self.protocol_version)
# Running on_startup callback.
for handler in self.on_startup:
out = handler()
if inspect.isawaitable(out):
await out
# register bento servicer
services.add_BentoServiceServicer_to_server(self.servicer, self)
services_health.add_HealthServicer_to_server(self.health_servicer, self)
service_names = self.service_names
# register custom servicer
for (
user_servicer,
add_servicer_fn,
user_service_names,
) in self.mount_servicers:
add_servicer_fn(user_servicer(), self)
service_names += tuple(user_service_names)
if self.enable_channelz:
try:
from grpc_channelz.v1 import channelz
except ImportError:
raise MissingDependencyException(
"'--debug' is passed, which requires 'grpcio-channelz' to be installed. Install with 'pip install bentoml[grpc-channelz]'."
) from None
if "GRPC_TRACE" not in os.environ:
logger.debug(
"channelz is enabled, while GRPC_TRACE is not set. No channel tracing will be recorded."
)
channelz.add_channelz_servicer(self)
if self.enable_reflection:
try:
# reflection is required for health checking to work.
from grpc_reflection.v1alpha import reflection
except ImportError:
raise MissingDependencyException(
"reflection is enabled, which requires 'grpcio-reflection' to be installed. Install with 'pip install bentoml[grpc-reflection]'."
) from None
service_names += (reflection.SERVICE_NAME,)
reflection.enable_server_reflection(service_names, self)
# mark all services as healthy
for service in service_names:
await self.health_servicer.set(
service, pb_health.HealthCheckResponse.SERVING # type: ignore (no types available)
)
await self.start()
@property
def on_shutdown(self) -> list[LifecycleHook]:
on_shutdown = [
*self.bento_service.shutdown_hooks,
self.bento_service.on_grpc_server_shutdown,
]
for runner in self.bento_service.runners:
on_shutdown.append(runner.destroy)
return on_shutdown
async def shutdown(self):
# Running on_shutdown callback.
for handler in self.on_shutdown:
out = handler()
if inspect.isawaitable(out):
await out
await self.stop(grace=self.graceful_shutdown_timeout)
await self.health_servicer.enter_graceful_shutdown()
self.loop.stop()
|
69104f100e65355e3ed60a73b4438f1ae15caa85
|
2212a32833776a5d5d2164d8efd11bd18bd3f768
|
/tf_agents/train/learner.py
|
72e795ad1b55cda295e8f47510e8a2d9f83147c9
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/agents
|
f39805fb98ef9af712dcaff3ba49e1ac6d42804b
|
eca1093d3a047e538f17f6ab92ab4d8144284f23
|
refs/heads/master
| 2023-08-14T04:56:30.774797
| 2023-08-02T17:43:44
| 2023-08-02T17:44:09
| 157,936,206
| 2,755
| 848
|
Apache-2.0
| 2023-07-26T02:35:32
| 2018-11-17T00:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 18,488
|
py
|
learner.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learner implementation for Agents. Refer to the examples dir."""
import os
from typing import Any, Optional, Tuple
from absl import logging
import gin
import tensorflow.compat.v2 as tf
from tf_agents.agents import tf_agent
from tf_agents.specs import tensor_spec
from tf_agents.train import interval_trigger
from tf_agents.typing import types
from tf_agents.utils import common
TRAIN_DIR = 'train'
POLICY_SAVED_MODEL_DIR = 'policies'
COLLECT_POLICY_SAVED_MODEL_DIR = 'collect_policy'
GREEDY_POLICY_SAVED_MODEL_DIR = 'greedy_policy'
RAW_POLICY_SAVED_MODEL_DIR = 'policy'
POLICY_CHECKPOINT_DIR = 'checkpoints'
REPLAY_BUFFER_CHECKPOINT_DIR = 'replay_buffer_checkpoints'
ExperienceAndSampleInfo = Tuple[types.NestedTensor, Tuple[Any, ...]]
@gin.configurable
class Learner(tf.Module):
"""Manages all the learning details needed when training an agent.
These include:
* Using distribution strategies correctly
* Summaries
* Checkpoints
* Minimizing entering/exiting TF context:
Especially in the case of TPUs scheduling a single TPU program to
perform multiple train steps is critical for performance.
* Generalizes the train call to be done correctly across CPU, GPU, or TPU
executions managed by DistributionStrategies. This uses `strategy.run` and
then makes sure to do a reduce operation over the `LossInfo` returned by
the agent.
"""
def __init__(
self,
root_dir,
train_step,
agent,
experience_dataset_fn=None,
after_train_strategy_step_fn=None,
triggers=None,
checkpoint_interval=100000,
summary_interval=1000,
max_checkpoints_to_keep=3,
use_kwargs_in_agent_train=False,
strategy=None,
run_optimizer_variable_init=True,
use_reverb_v2=False,
direct_sampling=False,
experience_dataset_options=None,
strategy_run_options=None,
summary_root_dir=None,
):
"""Initializes a Learner instance.
Args:
root_dir: Main directory path where checkpoints, saved_models, and
summaries (if summary_dir is not specified) will be written to.
train_step: a scalar tf.int64 `tf.Variable` which will keep track of the
number of train steps. This is used for artifacts created like
summaries, or outputs in the root_dir.
agent: `tf_agent.TFAgent` instance to train with.
experience_dataset_fn: a function that will create an instance of a
tf.data.Dataset used to sample experience for training. Required for
using the Learner as is. Optional for subclass learners which take a new
iterator each time when `learner.run` is called.
after_train_strategy_step_fn: (Optional) callable of the form `fn(sample,
loss)` which can be used for example to update priorities in a replay
buffer where sample is pulled from the `experience_iterator` and loss is
a `LossInfo` named tuple returned from the agent. This is called after
every train step. It runs using `strategy.run(...)`.
triggers: List of callables of the form `trigger(train_step)`. After every
`run` call every trigger is called with the current `train_step` value
as an np scalar.
checkpoint_interval: Number of train steps in between checkpoints. Note
these are placed into triggers and so a check to generate a checkpoint
only occurs after every `run` call. Set to -1 to disable (this is not
recommended, because it means that if the pipeline gets preempted, all
previous progress is lost). This only takes care of the checkpointing
the training process. Policies must be explicitly exported through
triggers.
summary_interval: Number of train steps in between summaries. Note these
are placed into triggers and so a check to generate a checkpoint only
occurs after every `run` call.
max_checkpoints_to_keep: Maximum number of checkpoints to keep around.
These are used to recover from pre-emptions when training.
use_kwargs_in_agent_train: If True the experience from the replay buffer
is passed into the agent as kwargs. This requires samples from the RB to
be of the form `dict(experience=experience, kwarg1=kwarg1, ...)`. This
is useful if you have an agent with a custom argspec.
strategy: (Optional) `tf.distribute.Strategy` to use during training.
run_optimizer_variable_init: Specifies if the variables of the optimizer
are initialized before checkpointing. This should be almost always
`True` (default) to ensure that the state of the optimizer is
checkpointed properly. The initialization of the optimizer variables
happens by building the Tensorflow graph. This is done by calling a
`get_concrete_function` on the agent's `train` method which requires
passing some input. Since, no real data is available at this point we
use the batched form of `training_data_spec` to achieve this (standard
technique). The problem arises when the agent expects some agent
specific batching of the input. In this case, there is no _general_ way
at this point in the learner to batch the impacted specs properly. To
avoid breaking the code in these specific cases, we recommend turning
off initialization of the optimizer variables by setting the value of
this field to `False`.
use_reverb_v2: If True then we expect the dataset samples to return a
named_tuple with a data and an info field. If False we expect a
tuple(data, info).
direct_sampling: Do not use replay_buffer, but sample from offline dataset
directly.
experience_dataset_options: (Optional) `tf.distribute.InputOptions` passed
to `strategy.distribute_datasets_from_function`, used to control options
on how this dataset is distributed.
strategy_run_options: (Optional) `tf.distribute.RunOptions` passed to
`strategy.run`. This is passed to every strategy.run invocation by the
learner.
summary_root_dir: (Optional) Root directory path where summaries will be
written to.
"""
if checkpoint_interval < 0:
logging.warning(
'Warning: checkpointing the training process is manually disabled.'
'This means training progress will NOT be automatically restored '
'if the job gets preempted.'
)
self._train_dir = os.path.join(root_dir, TRAIN_DIR)
summary_root_dir = (
root_dir if summary_root_dir is None else summary_root_dir
)
self._summary_dir = os.path.join(summary_root_dir, TRAIN_DIR)
self._use_reverb_v2 = use_reverb_v2
self._direct_sampling = direct_sampling
if summary_interval:
self.train_summary_writer = tf.compat.v2.summary.create_file_writer(
self._summary_dir, flush_millis=10000
)
else:
self.train_summary_writer = tf.summary.create_noop_writer()
self.train_step = train_step
self._agent = agent
self.use_kwargs_in_agent_train = use_kwargs_in_agent_train
self.strategy = strategy or tf.distribute.get_strategy()
dataset = None
if experience_dataset_fn:
with self.strategy.scope():
dataset = self.strategy.distribute_datasets_from_function(
lambda _: experience_dataset_fn(),
options=experience_dataset_options,
)
self._experience_iterator = iter(dataset)
self.after_train_strategy_step_fn = after_train_strategy_step_fn
self.triggers = triggers or []
# Prevent autograph from going into the agent.
self._agent.train = tf.autograph.experimental.do_not_convert(agent.train)
self._strategy_run_options = strategy_run_options
checkpoint_dir = os.path.join(self._train_dir, POLICY_CHECKPOINT_DIR)
with self.strategy.scope():
agent.initialize()
if run_optimizer_variable_init:
# Force a concrete function creation inside of the strategy scope to
# ensure that all variables, including optimizer slot variables, are
# created. This has to happen before the checkpointer is created.
if dataset is not None:
if use_reverb_v2:
batched_specs = dataset.element_spec.data
else:
# Assumes (experience, sample_info) = next(iterator)
batched_specs, _ = dataset.element_spec
else:
batched_specs = tensor_spec.add_outer_dims_nest(
self._agent.training_data_spec,
(None, self._agent.train_sequence_length),
)
if self.use_kwargs_in_agent_train:
batched_specs = dict(experience=batched_specs)
@common.function
def _create_variables(specs):
# TODO(b/170516529): Each replica has to be in the same graph.
# This can be ensured by placing the `strategy.run(...)` call inside
# the `tf.function`.
if self.use_kwargs_in_agent_train:
return self.strategy.run(
self._agent.train,
kwargs=specs,
options=self._strategy_run_options,
)
return self.strategy.run(
self._agent.train,
args=(specs,),
options=self._strategy_run_options,
)
_create_variables.get_concrete_function(batched_specs)
else:
# TODO(b/186052656) Update clients.
logging.warn('run_optimizer_variable_init = False is Deprecated')
self._checkpointer = common.Checkpointer(
checkpoint_dir,
max_to_keep=max_checkpoints_to_keep,
agent=self._agent,
train_step=self.train_step,
)
self._checkpointer.initialize_or_restore() # pytype: disable=attribute-error
for trigger in self.triggers:
if hasattr(trigger, 'set_start'):
trigger.set_start(self.train_step.numpy())
self.triggers.append(self._get_checkpoint_trigger(checkpoint_interval))
self.summary_interval = tf.constant(summary_interval, dtype=tf.int64)
@property
def train_step_numpy(self):
"""The current train_step.
Returns:
The current `train_step`. Note this will return a scalar numpy array which
holds the `train_step` value when this was called.
"""
return self.train_step.numpy()
def _get_checkpoint_trigger(self, checkpoint_interval):
if checkpoint_interval <= 0:
return lambda _, force_trigger=False: None
save_fn = lambda: self._checkpointer.save(self.train_step)
return interval_trigger.IntervalTrigger(
checkpoint_interval, save_fn, start=self.train_step.numpy()
)
def run(self, iterations=1, iterator=None, parallel_iterations=10):
"""Runs `iterations` iterations of training.
Args:
iterations: Number of train iterations to perform per call to run. The
iterations will be evaluated in a tf.while loop created by autograph.
Final aggregated losses will be returned.
iterator: The iterator to the dataset to use for training. If not
specified, `self._experience_iterator` is used.
parallel_iterations: Maximum number of train iterations to allow running
in parallel. This value is forwarded directly to the training tf.while
loop.
Returns:
The total loss computed before running the final step.
"""
assert iterations >= 1, (
'Iterations must be greater or equal to 1, was %d' % iterations
)
def _summary_record_if():
if self.summary_interval:
return tf.math.equal(
self.train_step % tf.constant(self.summary_interval), 0
)
else:
return tf.constant(False)
with self.train_summary_writer.as_default(), common.soft_device_placement(), tf.compat.v2.summary.record_if(
_summary_record_if
), self.strategy.scope():
iterator = iterator or self._experience_iterator
loss_info = self._train(
tf.constant(iterations), iterator, parallel_iterations
)
train_step_val = self.train_step.numpy()
for trigger in self.triggers:
trigger(train_step_val)
return loss_info
# Use tf.config.experimental_run_functions_eagerly(True) if you want to
# disable use of tf.function.
@common.function(autograph=True)
def _train(self, iterations, iterator, parallel_iterations):
# Call run explicitly once to get loss info shape for autograph. Because the
# for loop below will get converted to a `tf.while_loop` by autograph we
# need the shape of loss info to be well defined.
loss_info = self.single_train_step(iterator)
for _ in tf.range(iterations - 1):
tf.autograph.experimental.set_loop_options(
parallel_iterations=parallel_iterations
)
loss_info = self.single_train_step(iterator)
def _reduce_loss(loss):
rank = None
if isinstance(loss, tf.distribute.DistributedValues):
# If loss is distributed get the rank from the first replica.
rank = loss.values[0].shape.rank
elif tf.is_tensor(loss):
rank = loss.shape.rank
axis = None
if rank:
axis = tuple(range(0, rank))
return self.strategy.reduce(tf.distribute.ReduceOp.SUM, loss, axis=axis)
# We assume all data can be reduced in the loss_info. This means no
# string dtypes are currently allowed as LossInfo Fields.
reduced_loss_info = tf.nest.map_structure(_reduce_loss, loss_info)
return reduced_loss_info
def single_train_step(self, iterator):
sample = next(iterator)
if self._direct_sampling:
experience, sample_info = sample, None
elif self._use_reverb_v2:
experience, sample_info = sample.data, sample.info
else:
experience, sample_info = sample
if self.use_kwargs_in_agent_train:
loss_info = self.strategy.run(
self._agent.train,
kwargs=experience,
options=self._strategy_run_options,
)
else:
loss_info = self.strategy.run(
self._agent.train,
args=(experience,),
options=self._strategy_run_options,
)
if self.after_train_strategy_step_fn:
if self.use_kwargs_in_agent_train:
self.strategy.run(
self.after_train_strategy_step_fn,
kwargs=dict(
experience=(experience, sample_info), loss_info=loss_info
),
options=self._strategy_run_options,
)
else:
self.strategy.run(
self.after_train_strategy_step_fn,
args=((experience, sample_info), loss_info),
options=self._strategy_run_options,
)
return loss_info
def loss(
self,
experience_and_sample_info: Optional[ExperienceAndSampleInfo] = None,
reduce_op: tf.distribute.ReduceOp = tf.distribute.ReduceOp.SUM,
) -> tf_agent.LossInfo:
"""Computes loss for the experience.
Since this calls agent.loss() it does not update gradients or
increment the train step counter. Networks are called with `training=False`
so statistics like batch norm are not updated.
Args:
experience_and_sample_info: A batch of experience and sample info. If not
specified, `next(self._experience_iterator)` is used.
reduce_op: a `tf.distribute.ReduceOp` value specifying how loss values
should be aggregated across replicas.
Returns:
The total loss computed.
"""
def _summary_record_if():
return tf.math.equal(
self.train_step % tf.constant(self.summary_interval), 0
)
with self.train_summary_writer.as_default(), common.soft_device_placement(), tf.compat.v2.summary.record_if(
_summary_record_if
), self.strategy.scope():
if experience_and_sample_info is None:
sample = next(self._experience_iterator)
if self._direct_sampling:
experience_and_sample_info = (sample, None)
elif self._use_reverb_v2:
experience_and_sample_info = (sample.data, sample.info)
else:
experience_and_sample_info = sample
loss_info = self._loss(experience_and_sample_info, reduce_op)
return loss_info
# Use tf.config.experimental_run_functions_eagerly(True) if you want to
# disable use of tf.function.
@common.function(autograph=True)
def _loss(
self,
experience_and_sample_info: ExperienceAndSampleInfo,
reduce_op: tf.distribute.ReduceOp,
) -> tf_agent.LossInfo:
(experience, sample_info) = experience_and_sample_info
if self.use_kwargs_in_agent_train:
loss_info = self.strategy.run(self._agent.loss, kwargs=experience)
else:
loss_info = self.strategy.run(self._agent.loss, args=(experience,))
if self.after_train_strategy_step_fn:
if self.use_kwargs_in_agent_train:
self.strategy.run(
self.after_train_strategy_step_fn,
kwargs=dict(
experience=(experience, sample_info), loss_info=loss_info
),
options=self._strategy_run_options,
)
else:
self.strategy.run(
self.after_train_strategy_step_fn,
args=((experience, sample_info), loss_info),
options=self._strategy_run_options,
)
def _reduce_loss(loss):
rank = None
if isinstance(loss, tf.distribute.DistributedValues):
rank = loss.values[0].shape.rank
elif tf.is_tensor(loss):
rank = loss.shape.rank
axis = None
if rank:
axis = tuple(range(0, rank))
return self.strategy.reduce(reduce_op, loss, axis=axis)
# We assume all data can be reduced in the loss_info. This means no
# string dtypes are currently allowed as LossInfo Fields.
reduced_loss_info = tf.nest.map_structure(_reduce_loss, loss_info)
return reduced_loss_info
|
49fb26cf8c686f73b16cda458d5045601b85ed16
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0755. Pour Water/0755.py
|
ded1e66ef7d92c063fca0809346f9f5fd7c0abdb
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
0755.py
|
class Solution:
def pourWater(self, heights: List[int], volume: int, k: int) -> List[int]:
i = k
while volume > 0:
volume -= 1
while i > 0 and heights[i] >= heights[i - 1]:
i -= 1
while i + 1 < len(heights) and heights[i] >= heights[i + 1]:
i += 1
while i > k and heights[i] == heights[i - 1]:
i -= 1
heights[i] += 1
return heights
|
ac357f5d1c20f36ef7ccbd8158a61086fe3eac7b
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/source_code/source_code_of_lp3thw/ex7old.py
|
9e715ac5434ed13355bd1cba6c9bcf42a6bdeaf3
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 892
|
py
|
ex7old.py
|
print ( "Mary had a little lamb.")
print ("Its fleece was white as %s."%'snow')
print ("And everywhere that Mary went.")
print ("."*10)#what'd that do?
end1="C"
end2="h"
end3="e"
end4="e"
end5="s"
end6="e"
end7="B"
end8="u"
end9="r"
end10="g"
end11="e"
end12="r"
end0= " "# 我新加的一个字符串---“空格”
#watch that comma at the end. try removing it to see what happens.在Py3里,更改了逗号的位置(无论是括号内外都没有什么影响,都不能使得 英文的姓名 之间出现 空格);但是
print (end1 + end2 + end3 + end4 + end5 + end6,) ,print (end7 + end8 + end9 + end10 + end11 +end12)
print ("------我们是有底线的------")
# 运行结果证明,我在两段文字之间加上空格字符串的方法是可以实现 原文py中的案例的。
print (end1 + end2 + end3 + end4 + end5 + end6+end0+end7 + end8 + end9 + end10 + end11 +end12)
|
6cc9452dfb335714dc792cb250657a2b24d0207e
|
69d8d91954f6623f3674d52d734d589f72383628
|
/horizon/templatetags/horizon.py
|
4895e4f5876ea149e737ff86cedfaf271f78ab5f
|
[
"Apache-2.0"
] |
permissive
|
openstack/horizon
|
d031cebe126c06ad9717bbc52790b3d890e8661e
|
7896fd8c77a6766a1156a520946efaf792b76ca5
|
refs/heads/master
| 2023-09-04T06:57:58.069907
| 2023-09-01T20:17:10
| 2023-09-01T20:17:10
| 2,665,166
| 1,060
| 1,175
|
Apache-2.0
| 2023-08-07T02:33:44
| 2011-10-28T13:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 7,819
|
py
|
horizon.py
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.conf import settings
from django import template
from django.template import Node
from django.utils.encoding import force_str
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from horizon.base import Horizon
from horizon import conf
register = template.Library()
class MinifiedNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
return ' '.join(
force_str(self.nodelist.render(context).strip()).split()
).replace(' > ', '>').replace(' <', '<')
@register.filter
def has_permissions(user, component):
"""Checks if the given user meets the permissions requirements."""
return user.has_perms(getattr(component, 'permissions', set()))
@register.filter
def has_permissions_on_list(components, user):
return [component for component
in components if has_permissions(user, component)]
@register.inclusion_tag('horizon/_sidebar.html', takes_context=True)
def horizon_nav(context):
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
current_panel_group = None
current_panel = context['request'].horizon.get('panel', None)
dashboards = []
for dash in Horizon.get_dashboards():
panel_groups = dash.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if panel == current_panel:
current_panel_group = group.slug
if allowed_panels:
non_empty_groups.append((group, allowed_panels))
if (callable(dash.nav) and dash.nav(context) and
dash.can_access(context)):
dashboards.append((dash, OrderedDict(non_empty_groups)))
elif (not callable(dash.nav) and dash.nav and
dash.can_access(context)):
dashboards.append((dash, OrderedDict(non_empty_groups)))
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'current_panel_group': current_panel_group,
'current_panel': current_panel.slug if current_panel else '',
'request': context['request']}
@register.inclusion_tag('horizon/_nav_list.html', takes_context=True)
def horizon_main_nav(context):
"""Generates top-level dashboard navigation entries."""
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if dash.can_access(context):
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'request': context['request']}
@register.inclusion_tag('horizon/_subnav_list.html', takes_context=True)
def horizon_dashboard_nav(context):
"""Generates sub-navigation entries for the current dashboard."""
if 'request' not in context:
return {}
dashboard = context['request'].horizon['dashboard']
panel_groups = dashboard.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
if group.name is None:
non_empty_groups.append((dashboard.name, allowed_panels))
else:
non_empty_groups.append((group.name, allowed_panels))
return {'components': OrderedDict(non_empty_groups),
'user': context['request'].user,
'current': context['request'].horizon['panel'].slug,
'request': context['request']}
@register.filter
def quota(val, units=None):
if val == float("inf"):
return _("(No Limit)")
if units is not None:
return "%s %s %s" % (val, force_str(units),
force_str(_("Available")))
return "%s %s" % (val, force_str(_("Available")))
@register.filter
def quotainf(val, units=None):
if val == float("inf"):
return '-1'
if units is not None:
return "%s %s" % (val, units)
return val
@register.simple_tag
def quotapercent(used, limit):
if used >= limit or limit == 0:
return 100
if limit == float("inf"):
return '[%s, true]' % used
return round((float(used) / float(limit)) * 100)
class JSTemplateNode(template.Node):
"""Helper node for the ``jstemplate`` template tag."""
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context,):
output = self.nodelist.render(context)
output = output.replace('[[[', '{{{').replace(']]]', '}}}')
output = output.replace('[[', '{{').replace(']]', '}}')
output = output.replace('[%', '{%').replace('%]', '%}')
return output
@register.tag
def jstemplate(parser, token):
"""Templatetag to handle any of the Mustache-based templates.
Replaces ``[[[`` and ``]]]`` with ``{{{`` and ``}}}``,
``[[`` and ``]]`` with ``{{`` and ``}}`` and
``[%`` and ``%]`` with ``{%`` and ``%}`` to avoid conflicts
with Django's template engine when using any of the Mustache-based
templating libraries.
"""
nodelist = parser.parse(('endjstemplate',))
parser.delete_first_token()
return JSTemplateNode(nodelist)
@register.simple_tag
def load_config():
return conf
@register.simple_tag
def datepicker_locale():
locale_mapping = settings.DATEPICKER_LOCALES
return locale_mapping.get(translation.get_language(), 'en')
@register.simple_tag
def template_cache_age():
return settings.NG_TEMPLATE_CACHE_AGE
@register.tag
def minifyspace(parser, token):
"""Removes whitespace including tab and newline characters.
Do not use this if you are using a <pre> tag.
Example usage::
{% minifyspace %}
<p>
<a title="foo"
href="foo/">
Foo
</a>
</p>
{% endminifyspace %}
This example would return this HTML::
<p><a title="foo" href="foo/">Foo</a></p>
"""
nodelist = parser.parse(('endminifyspace',))
parser.delete_first_token()
return MinifiedNode(nodelist)
|
f5f10207937fd30737d9cc62458641c902d0b0b3
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/datadog_checks_base/datadog_checks/base/utils/tagging.py
|
838cc8f8a5d75051ca7ab5b425f2846e7d27f388
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
tagging.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
try:
import tagger
except ImportError:
from ..stubs import tagger # noqa: F401
GENERIC_TAGS = {
'cluster_name',
'clustername',
'cluster',
'clusterid',
'cluster_id',
'env',
'host_name',
'hostname',
'host',
'service',
'version',
}
|
217b6517a6168bbf004fcfc41c04509bee1bcc97
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/libs/liboozie/src/liboozie/credentials.py
|
811d61f804a8be727d3f4df00ca577c369c867b7
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
credentials.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import sys
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger()
class Credentials(object):
NAME_TO_CLASS_MAPPING = {
"hcat": "org.apache.oozie.action.hadoop.HCatCredentials",
"hive2": "org.apache.oozie.action.hadoop.Hive2Credentials",
"hbase": "org.apache.oozie.action.hadoop.HbaseCredentials",
}
def __init__(self, credentials=None):
if credentials is None:
self.credentials = {}
else:
self.credentials = credentials
def fetch(self, oozie_api):
configuration = oozie_api.get_configuration()
self.credentials = self._parse_oozie(configuration)
def _parse_oozie(self, configuration_dic):
return dict([cred.strip().split('=') for cred in configuration_dic.get('oozie.credentials.credentialclasses', '').strip().split(',') if cred])
@property
def class_to_name_credentials(self):
return dict((v,k) for k, v in self.credentials.items())
def get_properties(self, hive_properties=None):
credentials = {}
from beeswax import hive_site, conf
if not hasattr(conf.HIVE_SERVER_HOST, 'get') or not conf.HIVE_SERVER_HOST.get():
LOG.warning('Could not get all the Oozie credentials: beeswax app is blacklisted.')
else:
if hive_properties is None:
hive_properties = hive_site.get_metastore()
if hive_properties:
hive_properties['hive2.server.principal'] = hive_site.get_hiveserver2_kerberos_principal(conf.HIVE_SERVER_HOST.get())
if not hive_properties:
hive_properties = {}
LOG.warning('Could not get all the Oozie credentials: hive-site.xml required on the Hue host.')
credentials[self.hive_name] = {
'xml_name': self.hive_name,
'properties': [
('hcat.metastore.uri', hive_properties.get('thrift_uri')),
('hcat.metastore.principal', hive_properties.get('kerberos_principal')),
]
}
credentials[self.hiveserver2_name] = {
'xml_name': self.hiveserver2_name,
'properties': [
('hive2.jdbc.url', hive_site.hiveserver2_jdbc_url()),
('hive2.server.principal', hive_properties.get('hive2.server.principal')),
]
}
credentials[self.hbase_name] = {
'xml_name': self.hbase_name,
'properties': []
}
return credentials
@property
def hive_name(self):
return self.class_to_name_credentials.get('org.apache.oozie.action.hadoop.HCatCredentials')
@property
def hiveserver2_name(self):
return self.class_to_name_credentials.get('org.apache.oozie.action.hadoop.Hive2Credentials')
@property
def hbase_name(self):
return self.class_to_name_credentials.get('org.apache.oozie.action.hadoop.HbaseCredentials')
|
485ffaa14d0a41dfdea8f8671e45deb8731bd43a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/bz2.pyi
|
cea317e2803717f2cf8a6c08a91e3cb43956877f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 4,849
|
pyi
|
bz2.pyi
|
import _compression
import sys
from _compression import BaseStream
from _typeshed import ReadableBuffer, Self, StrOrBytesPath, WriteableBuffer
from collections.abc import Iterable
from typing import IO, Any, Protocol, TextIO, overload
from typing_extensions import Literal, SupportsIndex, TypeAlias, final
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", "open", "compress", "decompress"]
# The following attributes and methods are optional:
# def fileno(self) -> int: ...
# def close(self) -> object: ...
class _ReadableFileobj(_compression._Reader, Protocol): ...
class _WritableFileobj(Protocol):
def write(self, __b: bytes) -> object: ...
# The following attributes and methods are optional:
# def fileno(self) -> int: ...
# def close(self) -> object: ...
def compress(data: bytes, compresslevel: int = ...) -> bytes: ...
def decompress(data: bytes) -> bytes: ...
_ReadBinaryMode: TypeAlias = Literal["", "r", "rb"]
_WriteBinaryMode: TypeAlias = Literal["w", "wb", "x", "xb", "a", "ab"]
_ReadTextMode: TypeAlias = Literal["rt"]
_WriteTextMode: TypeAlias = Literal["wt", "xt", "at"]
@overload
def open(
filename: _ReadableFileobj,
mode: _ReadBinaryMode = ...,
compresslevel: int = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BZ2File: ...
@overload
def open(
filename: _ReadableFileobj,
mode: _ReadTextMode,
compresslevel: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> TextIO: ...
@overload
def open(
filename: _WritableFileobj,
mode: _WriteBinaryMode,
compresslevel: int = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BZ2File: ...
@overload
def open(
filename: _WritableFileobj,
mode: _WriteTextMode,
compresslevel: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> TextIO: ...
@overload
def open(
filename: StrOrBytesPath,
mode: _ReadBinaryMode | _WriteBinaryMode = ...,
compresslevel: int = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BZ2File: ...
@overload
def open(
filename: StrOrBytesPath,
mode: _ReadTextMode | _WriteTextMode,
compresslevel: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> TextIO: ...
@overload
def open(
filename: StrOrBytesPath | _ReadableFileobj | _WritableFileobj,
mode: str,
compresslevel: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> BZ2File | TextIO: ...
class BZ2File(BaseStream, IO[bytes]):
def __enter__(self: Self) -> Self: ...
if sys.version_info >= (3, 9):
@overload
def __init__(self, filename: _WritableFileobj, mode: _WriteBinaryMode, *, compresslevel: int = ...) -> None: ...
@overload
def __init__(self, filename: _ReadableFileobj, mode: _ReadBinaryMode = ..., *, compresslevel: int = ...) -> None: ...
@overload
def __init__(
self, filename: StrOrBytesPath, mode: _ReadBinaryMode | _WriteBinaryMode = ..., *, compresslevel: int = ...
) -> None: ...
else:
@overload
def __init__(
self, filename: _WritableFileobj, mode: _WriteBinaryMode, buffering: Any | None = ..., compresslevel: int = ...
) -> None: ...
@overload
def __init__(
self, filename: _ReadableFileobj, mode: _ReadBinaryMode = ..., buffering: Any | None = ..., compresslevel: int = ...
) -> None: ...
@overload
def __init__(
self,
filename: StrOrBytesPath,
mode: _ReadBinaryMode | _WriteBinaryMode = ...,
buffering: Any | None = ...,
compresslevel: int = ...,
) -> None: ...
def read(self, size: int | None = ...) -> bytes: ...
def read1(self, size: int = ...) -> bytes: ...
def readline(self, size: SupportsIndex = ...) -> bytes: ... # type: ignore[override]
def readinto(self, b: WriteableBuffer) -> int: ...
def readlines(self, size: SupportsIndex = ...) -> list[bytes]: ...
def seek(self, offset: int, whence: int = ...) -> int: ...
def write(self, data: ReadableBuffer) -> int: ...
def writelines(self, seq: Iterable[ReadableBuffer]) -> None: ...
@final
class BZ2Compressor:
def __init__(self, compresslevel: int = ...) -> None: ...
def compress(self, __data: bytes) -> bytes: ...
def flush(self) -> bytes: ...
@final
class BZ2Decompressor:
def decompress(self, data: bytes, max_length: int = ...) -> bytes: ...
@property
def eof(self) -> bool: ...
@property
def needs_input(self) -> bool: ...
@property
def unused_data(self) -> bytes: ...
|
373924947fc0425c70de76bf8f71b797e81f8401
|
4d44674625100e62be2bb5033339fb641bd454ac
|
/snippet/example/python/ping_ip.py
|
be343b59aceadc2aed0767868d27ac8da3e8e4bd
|
[
"MIT"
] |
permissive
|
xgfone/snippet
|
8b9004a649d2575b493a376c4b4f3d4a7c56a4b0
|
b0b734dd35478b7ef3e6193623981f4f29b6748c
|
refs/heads/master
| 2022-03-18T12:41:09.033144
| 2022-02-20T15:26:35
| 2022-02-20T15:26:35
| 41,615,643
| 158
| 61
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
ping_ip.py
|
#!/usr/bin/env python3
#
# For Python 3.3+
#
import sys
import queue
import threading
import ipaddress
import subprocess
OPT = "-n 1 -w 1000" if sys.platform in ("win32", "cygwin") else "-c 1 -W 1"
def ping_ip(ip, results):
ok = True
try:
subprocess.check_output("ping {0} {1}".format(OPT, ip), shell=True)
except Exception:
ok = False
results.put(ip)
print("Test {:15} --> {}".format(ip, "OK" if ok else "X"))
def _ping_ips(ips):
tasks = []
results = queue.Queue()
for ip in ips:
t = threading.Thread(target=ping_ip, args=(ip, results), daemon=True)
t.start()
tasks.append(t)
for t in tasks:
t.join()
ips = []
while True:
try:
ips.append(results.get_nowait())
results.task_done()
except Exception:
break
return ips
def ping_ips(ips, size=1000):
results = []
while ips:
results.extend(_ping_ips(ips[:size]))
ips = ips[size:]
return results
def parse_ips(args):
ips = set()
for v in args:
for ip in v.split(","):
ip = ip.strip()
if not ip:
continue
if "/" not in ip:
ips.add(ip)
continue
for ip in ipaddress.IPv4Network(ip, strict=False).hosts():
ips.add(str(ip))
return sorted(ips)
if __name__ == "__main__":
ips = parse_ips(sys.argv[1:])
failed_ips = ping_ips(ips)
print("\nFailed IPs:\n {}".format(failed_ips))
|
e44b5cc61b8f1316e7e39504e69b3d259b1fb826
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/turtle/hexagone/main.py
|
79ce05588cb9650916614442edd18f018a6a02b6
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
main.py
|
import turtle
from math import pi, sin, cos
def hexagone(point, longueur,c):
l = longueur
x, y = point
turtle.up()
turtle.goto(point)
turtle.color(c[0]) #black
turtle.down()
turtle.begin_fill()
turtle.goto(l * cos(4 / 3 * pi )+x, l * sin(4 / 3 * pi)+y)
turtle.goto(l * cos(5 / 3 * pi)+x, l * sin(5 / 3 * pi)+y)
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[1]) #blue
turtle.begin_fill()
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(l * cos(pi / 3)+x, l * sin(pi / 3)+y)
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[2]) #red
turtle.begin_fill()
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(-l+x, 0+y)
turtle.goto(l * cos(4 / 3 * pi)+x, l * sin(4 / 3 * pi)+y)
turtle.goto(point)
turtle.end_fill()
turtle.up()
return True
hexagone((0,0), 50, ("black",("blue"),("red")))
hexagone((100,0), 50, ("black",("blue"),("red")))
hexagone((0,100), 50, ("black",("blue"),("red")))
hexagone((100,100), 50, ("black",("blue"),("red")))
turtle.done()
|
7869b0f9a4ce72ddab851fe7338dc5229f94cf8e
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/vai_q_tensorflow/tools/create_optimize.py
|
2150722bf640941fd612f5878443efd2011679da
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
create_optimize.py
|
import os
import copy
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import vai_q_tensorflow as decent_q
# from tensorflow.contrib import decent_q
os.environ["DECENT_DEBUG"] = "0"
src_graph = "./quantize_results/quantize_eval_model.pb"
dst_graph_dir = "./"
dst_graph = "optimized_quantize_model.pb"
input_nodes = ["input_tensor"]
output_nodes = ["softmax_tensor"]
# output_nodes = ["image"]
input_shapes=[[-1, 224, 224, 3]]
def main():
name_to_node = {}
with gfile.FastGFile(src_graph,'rb') as f:
src_graph_def = tf.GraphDef()
src_graph_def.ParseFromString(f.read())
config = decent_q.QuantizeConfig(input_nodes=input_nodes,
output_nodes=output_nodes,
input_shapes=input_shapes,
weight_bit=8,
activation_bit=8,
method=1,
simulate_dpu=0,
output_dir=dst_graph_dir)
folded_graph_def = decent_q.CreateOptimizedGraphDef(src_graph_def,
config)
tf.io.write_graph(folded_graph_def, dst_graph_dir, dst_graph, as_text=False)
if __name__ == '__main__':
main()
|
19f45faaabc43c1896d0d4562cfa1482c99940f1
|
9e988f0dce0ee4b847808210bf5b703b1619fed5
|
/lib/bx/filter.py
|
b130271f472af039caf4a5cd5f710c4fb598f190
|
[
"MIT"
] |
permissive
|
bxlab/bx-python
|
1c4aa875e77ee97dd3c30b89c28a6c4acd0b821b
|
7758bc4492626ffdbaa90c8fc5dd7620b1e2f3f8
|
refs/heads/main
| 2023-08-08T15:17:47.383099
| 2023-07-27T12:09:24
| 2023-07-27T12:09:24
| 58,659,170
| 141
| 55
|
MIT
| 2023-07-26T10:28:07
| 2016-05-12T16:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
filter.py
|
"""
Classes for implementing `Pipeline`s composed of `Filter`s (intended to be
subclassed).
"""
class Filter:
def __init__(self, **kwargs):
raise Exception("AbstractClass")
def run(self, reader, writer):
for block in reader:
block = self(block)
if block:
writer(block)
def step(self, reader, writer):
block = next(reader)
if not block:
raise StopIteration
block = self(block)
if block:
writer(block)
def __call__(self, block):
raise Exception("AbstractMethod")
class Pipeline(Filter):
def __init__(self, **kwargs):
self.pipeline = list()
def __call__(self, block):
for function in self.pipeline:
if not block:
return block
try:
f = function.__call__
except AttributeError:
raise TypeError("'" + function.__class__.__name__ + "' is not callable.")
block = f(block)
return block
def append(self, function):
try:
function.__call__
except AttributeError:
raise TypeError("'" + function.__class__.__name__ + "' is not callable.")
return self.pipeline.append(function)
def remove(self, function):
return self.pipeline.remove(function)
def extend(self, pipeline):
for item in pipeline:
self.append(item)
# Container interface
def __len__(self):
return len(self.pipeline)
def __getitem__(self, key):
return self.pipeline[key]
def __setitem__(self, key, value):
try:
value.__call__
except AttributeError:
raise TypeError("'" + value.__class__.__name__ + "' is not callable.")
return self.pipeline.__setitem__(key, value)
def __delitem__(self, key):
return self.pipeline.__delitem__(key)
def __iter__(self):
return self.pipeline.__iter__()
def __contains__(self, item):
return self.pipeline.__contains__(item)
|
f2e3617607d125f0c7c05bf3b86bca65f94e79d5
|
01b9d7d2da922589906d755e4c5d63799281d4c8
|
/tests/test_run_plugin.py
|
dcbc032210c12445ab9af8b7a516d9252df7cb06
|
[
"MIT"
] |
permissive
|
Akuli/porcupine
|
a2c3c0b3263a8b16649d5007f7c1d96cbec7dfc0
|
2f705481ef7bd9ed4516743ad3fd6469c5f2b276
|
refs/heads/main
| 2023-08-25T00:09:19.294259
| 2023-08-05T10:28:03
| 2023-08-05T10:28:03
| 81,675,927
| 131
| 59
|
MIT
| 2023-08-11T21:46:05
| 2017-02-11T19:17:09
|
Python
|
UTF-8
|
Python
| false
| false
| 18,369
|
py
|
test_run_plugin.py
|
import os
import shutil
import sys
import time
from tkinter import ttk
import pytest
from porcupine import get_main_window, get_tab_manager, utils
from porcupine.plugins.run import common, dialog, history, no_terminal, terminal
@pytest.fixture(autouse=True)
def isolated_history():
# We don't overwrite the user's file because porcupine.dirs is monkeypatched
path = history._get_path()
assert not path.exists()
yield
try:
path.unlink()
except FileNotFoundError:
pass
@pytest.fixture
def fake_runner(tmp_path, monkeypatch):
if sys.platform == "win32":
path = tmp_path / "fake_runner.py"
input_statement = "input()"
else:
path = tmp_path / "fake_runner.sh"
input_statement = "read junk"
shutil.copy(terminal.run_script, path)
old_content = path.read_text()
assert old_content.count(input_statement) == 1
path.write_text(old_content.replace(input_statement, ""))
monkeypatch.setattr("porcupine.plugins.run.terminal.run_script", path)
@pytest.mark.skipif(
os.environ.get("GITHUB_ACTIONS") == "true", reason="no external terminal on github actions"
)
def test_external_terminal(filetab, tmp_path, fake_runner, wait_until):
filetab.textwidget.insert("end", "open('file', 'w').write('hello')")
filetab.save_as(tmp_path / "hello.py")
get_main_window().event_generate("<<Run:Repeat0>>")
wait_until(lambda: (tmp_path / "file").exists() and (tmp_path / "file").read_text() == "hello")
def get_output():
return no_terminal.runner.textwidget.get("1.0", "end - 1 char")
def test_unicodes(filetab, tmp_path, wait_until):
filetab.textwidget.insert(
"end",
r"""
print("123")
print("örkki")
import sys
# Test error handling for badly printed bytes
# All bytes that are invalid utf-8 AND invalid cp1252: 81, 8D, 8F, 90, 9D
sys.stderr.buffer.write(b'\x81')
print()
# unicodes beyond U+FFFF are not supported by tk
# can't test this on windows because cp1252 doesn't go beyond U+FFFF
if sys.platform != "win32":
print("\N{pile of poo}")
""",
)
filetab.save_as(tmp_path / "lol.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} lol.py", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert "123" in get_output()
assert "örkki" in get_output()
if sys.platform == "win32":
assert get_output().count("\N{replacement character}") == 1
else:
assert get_output().count("\N{replacement character}") == 2
def test_input(filetab, tmp_path, wait_until):
filetab.textwidget.insert("end", "x = input('Enter something: ')\n")
filetab.textwidget.insert("end", "print('You said ' + x)\n")
filetab.save_as(tmp_path / "lol.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} lol.py", tmp_path)
wait_until(lambda: "Enter something:" in get_output())
no_terminal.runner.textwidget.insert("insert", "örkkimörkkiäinen")
no_terminal.runner.handle_enter_press()
wait_until(lambda: "The process completed successfully." in get_output())
assert (
get_output()
.strip()
.endswith(
"Enter something: örkkimörkkiäinen\nYou said örkkimörkkiäinen\nThe process completed successfully."
)
)
def there_are_links():
return bool(no_terminal.runner.textwidget.tag_ranges("link"))
def click_last_link():
assert there_are_links()
textwidget = no_terminal.runner.textwidget
textwidget.mark_set("current", "link.last - 1 char")
no_terminal.runner._link_manager._open_link(None)
return get_tab_manager().select().textwidget.get("sel.first", "sel.last")
@pytest.mark.skipif(
sys.platform == "win32",
reason="windows users are sometimes admins, so no inaccessible paths to test with",
)
def test_inaccessible_filename(filetab, tmp_path, wait_until):
filetab.textwidget.insert("end", r"print('/root/s3kr3t.txt:123: hello')")
filetab.save_as(tmp_path / "lol.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} lol.py", tmp_path)
# Doesn't really matter whether it's highlighted or not, as long as it didn't crash
wait_until(lambda: "The process completed successfully." in get_output())
def test_random_bytes_interpreted_as_very_long_filename_bug(filetab, tmp_path, wait_until):
filetab.textwidget.insert(
"end",
r"""
print(bytes.fromhex('''
ffffffffffffffffffffffffffffffff57cc11aea00f1d1238
caf9520645b9318644629efae43bcb2e42a7ae6d80fd87e7ab
381d7252c5bc217dbdbc08f65ff1fd4beec736350ca507c16e
cd1d7884c14259e40e6a9f681781c85f278dd84ac3947ac51c
c4ba66caa4303575dd315e9291c9f3df17f433ea12a45d950c
bbcee6ada3a9ae81e7b2825d90c2fe6f75443f7dfabe7fc095
bf8129b8c074c96f5330805cf6707d9191b194ec651dd467e5
a0312e9fdf96a3b242dda57893de3cb588b8eba4f79a43629c
bc7698e4a51876c804464b5a28f667d16d0a7bbf7439f2d2d3
4c42cfe9ca16d716cb8b067ef836160343a79ead656b0e6155
ef2a3fc785a8bffd7cc5ef399e083238d03af67d0c7da82484
3ffb32aa68f6d42422e7ba0db4d7065a0958ab8ab36fca74c6
e9dc410138d6e707e6f16e3a6bebce07b8c0ae21d8385e9496
9ef4a18cccd6ea3b11ad488700e6bb4f05d1790005b3316808
70106d8ea14be82a0dbf128e10df95f11c65516cf76cd2221c
81c87ddafeeeba9eac8b1bdf1e92fc52309c6956e00fca5e05
e3ae4f0a8bf21e48847b815f129cf3acd7d7f3caa823eb3a33
33b487f9dac56ca370099425e988e42367296130eda2d48622
96bd5bfa3321828528976c398a84e127e190b19c3001466ecd
dc065ff9749eb33cb5276899ce5fd0dcb8631eec2c96f3daa9
'''))
""",
)
filetab.save_as(tmp_path / "lol.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} lol.py", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert not there_are_links()
@pytest.mark.skipif(
os.environ.get("GITHUB_ACTIONS") == "true" and sys.platform == "win32",
reason="sometimes fails randomly on windows github actions, hard to debug",
)
def test_repeat_in_another_file(tmp_path, tabmanager, mocker, monkeypatch, wait_until):
(tmp_path / "a.py").write_text("print('aaa')")
(tmp_path / "b.py").write_text("print('bbb')")
a = tabmanager.open_file(tmp_path / "a.py")
b = tabmanager.open_file(tmp_path / "b.py")
def fake_wait_window(toplevel):
# click run button (lol)
widgets = [toplevel]
while True:
w = widgets.pop()
if isinstance(w, ttk.Button) and w["text"] == "Run":
w.invoke()
break
widgets.extend(w.winfo_children())
actual_repeater = history.get_command_to_repeat
def fake_repeater(*args, **kwargs):
result = actual_repeater(*args, **kwargs)
result.external_terminal = False
return result
monkeypatch.setattr("tkinter.Toplevel.wait_window", fake_wait_window)
monkeypatch.setattr("porcupine.plugins.run.history.get_command_to_repeat", fake_repeater)
tabmanager.select(a)
get_main_window().event_generate("<<Run:AskAndRun0>>")
wait_until(lambda: "aaa" in get_output())
tabmanager.select(b)
get_main_window().event_generate("<<Run:Repeat0>>")
wait_until(lambda: "bbb" in get_output())
def test_python_error_message(filetab, tabmanager, tmp_path, wait_until):
(tmp_path / "asdf.py").write_text("print(1)\nopen('this does not exist')\nprint(2)\n")
filetab.textwidget.insert("end", "import asdf")
filetab.save_as(tmp_path / "main.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} main.py", tmp_path)
wait_until(lambda: "The process failed with status 1." in get_output())
assert "No such file or directory" in get_output()
assert click_last_link() == "open('this does not exist')"
def test_mypy_error_message(filetab, tabmanager, tmp_path, wait_until):
filetab.textwidget.insert("end", "print(1 + 2)\nprint(1 + 'lol')\n")
filetab.save_as(tmp_path / "lel.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} -m mypy lel.py", tmp_path)
# long timeout, mypy can be slow
wait_until((lambda: "The process failed with status 1." in get_output()), timeout=60)
assert click_last_link() == "print(1 + 'lol')"
def test_pytest_error_message(tabmanager, tmp_path, wait_until):
(tmp_path / "tests.py").write_text("def test_foo(asdf): pass")
no_terminal.run_command(f"{utils.quote(sys.executable)} -m pytest tests.py", tmp_path)
wait_until(lambda: "The process failed with status 1." in get_output())
assert click_last_link() == "def test_foo(asdf): pass"
@pytest.mark.skipif(
sys.platform == "win32",
reason="commands below wouldn't work on windows even if valgrind supported windows",
)
@pytest.mark.skipif(shutil.which("gcc") is None, reason="C compiler needed")
@pytest.mark.skipif(shutil.which("valgrind") is None, reason="need valgrind")
# caplog needed to silence logging errors from langserver plugin, which tries to start clangd
def test_valgrind_error_message(filetab, tmp_path, wait_until, caplog):
filetab.textwidget.insert(
"end",
r"""
#include <stdio.h>
#include <stdlib.h>
int main()
{
char *ptr = malloc(1);
printf("%c\n", *ptr);
return 0;
}
""",
)
filetab.save_as(tmp_path / "bug.c")
no_terminal.run_command("gcc -g bug.c", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
no_terminal.run_command("valgrind ./a.out", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert click_last_link() == r' printf("%c\n", *ptr);'
@pytest.mark.skipif(shutil.which("grep") is None, reason="uses grep")
def test_grep_n_output(tabmanager, tmp_path, wait_until):
(tmp_path / ".github").mkdir()
(tmp_path / ".github" / "asdf").write_text("foo")
(tmp_path / "lol").write_text("bar")
no_terminal.run_command("grep -n -r foo .", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert click_last_link() == "foo"
no_terminal.run_command("grep -n -r bar .", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert click_last_link() == "bar"
def test_pyright_output(tabmanager, tmp_path, wait_until):
(tmp_path / "_curses.pyi").write_text(
"""\
import sys
from typing import IO, Any, BinaryIO, NamedTuple, Tuple
"""
)
(tmp_path / "fake_pyright.py").write_text(
"""
import os
print(f"{os.getcwd()}/_curses.pyi")
print(f" {os.getcwd()}/_curses.pyi:2:51 - error: blah blah")
"""
)
no_terminal.run_command(f"{utils.quote(sys.executable)} fake_pyright.py", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
assert click_last_link() == "from typing import IO, Any, BinaryIO, NamedTuple, Tuple"
def test_python_unbuffered(tmp_path, wait_until):
(tmp_path / "sleeper.py").write_text(
"""
import time
print("This should show up immediately")
time.sleep(10)
"""
)
start = time.monotonic()
no_terminal.run_command(f"{utils.quote(sys.executable)} sleeper.py", tmp_path)
wait_until(lambda: "This should show up immediately" in get_output())
end = time.monotonic()
assert end - start < 9
def test_not_line_buffered(tmp_path, wait_until):
(tmp_path / "sleeper.py").write_text(
"""
import time
print("This should show up immediately", end="", flush=True)
time.sleep(10)
"""
)
start = time.monotonic()
no_terminal.run_command(f"{utils.quote(sys.executable)} sleeper.py", tmp_path)
wait_until(lambda: "This should show up immediately" in get_output())
end = time.monotonic()
assert end - start < 9
def test_crlf_on_any_platform(tmp_path, wait_until):
(tmp_path / "crlf.py").write_text(r"import sys; sys.stdout.buffer.write(b'foo\r\nbar')")
no_terminal.run_command(f"{utils.quote(sys.executable)} crlf.py", tmp_path)
wait_until(lambda: "foo\nbar" in get_output())
def test_changing_current_file(filetab, tmp_path, wait_until):
filetab.textwidget.insert("end", 'with open("foo.py", "w") as f: f.write("lol")')
filetab.save_as(tmp_path / "foo.py")
no_terminal.run_command(f"{utils.quote(sys.executable)} foo.py", tmp_path)
wait_until(lambda: filetab.textwidget.get("1.0", "end").strip() == "lol")
def test_no_previous_command_error(filetab, tmp_path, mocker):
filetab.save_as(tmp_path / "foo.txt")
mock = mocker.patch("tkinter.messagebox.showerror")
get_main_window().event_generate("<<Run:Repeat0>>")
mock.assert_called_once()
shift_f5 = "⇧F5" if filetab.tk.eval("tk windowingsystem") == "aqua" else "Shift+F5"
assert f"press {shift_f5} to choose a command" in str(mock.call_args)
assert "then repeat it with F5" in str(mock.call_args)
def test_example_commands_of_different_filetypes(filetab, tmp_path, mocker):
python_mock = mocker.patch("porcupine.plugins.run.terminal.run_command")
html_mock = mocker.patch("porcupine.plugins.run.no_terminal.run_command")
filetab.save_as(tmp_path / "hello.py")
get_main_window().event_generate("<<Run:Repeat0>>")
filetab.save_as(tmp_path / "asdf.html")
get_main_window().event_generate("<<Run:Repeat0>>")
html_path = utils.quote(str(tmp_path / "asdf.html"))
if sys.platform == "win32":
python_mock.assert_called_once_with("py hello.py", tmp_path)
html_mock.assert_called_once_with(f"explorer {html_path}", tmp_path)
else:
opener = "open" if sys.platform == "darwin" else "x-www-browser"
python_mock.assert_called_once_with("python3 hello.py", tmp_path)
html_mock.assert_called_once_with(f"{opener} {html_path} >/dev/null 2>&1 &", tmp_path)
def test_cwd_entry(filetab, tmp_path):
(tmp_path / "subdir").mkdir()
filetab.save_as(tmp_path / "foo.txt")
asker = dialog._CommandAsker(common.Context(filetab, 1))
asker.command.format_var.set("echo lol")
assert asker.cwd.format_var.get() == "{folder_path}"
assert str(asker.run_button["state"]) == "normal"
assert asker.get_command().format_cwd() == tmp_path
for path in ["", ".", "..", "../..", tmp_path.name, "subdir", str(tmp_path / "foo.txt")]:
asker.cwd.format_var.set(path)
assert str(asker.run_button["state"]) == "disabled"
for path in [tmp_path.parent, tmp_path, tmp_path / "subdir"]:
asker.cwd.format_var.set(str(path))
assert str(asker.run_button["state"]) == "normal"
asker.get_command().format_cwd() == path
asker.window.destroy()
SMALL_TIME = 0.1
def size_is_changing(path):
old_size = path.stat().st_size
time.sleep(2 * SMALL_TIME)
new_size = path.stat().st_size
return old_size != new_size
@pytest.mark.skipif(sys.platform == "darwin", reason="somehow fails github actions on macos")
def test_previous_process_dies(tmp_path, wait_until):
(tmp_path / "hello.py").write_text("print('Hello')")
(tmp_path / "killed.py").write_text(
rf"""
import time
while True:
with open("out.txt", "a") as file:
file.write("Still alive\n")
time.sleep({SMALL_TIME})
"""
)
no_terminal.run_command(f"{utils.quote(sys.executable)} killed.py", tmp_path)
wait_until(lambda: (tmp_path / "out.txt").exists())
assert size_is_changing(tmp_path / "out.txt")
no_terminal.run_command(f"{utils.quote(sys.executable)} hello.py", tmp_path)
wait_until(lambda: "Hello" in get_output())
assert not size_is_changing(tmp_path / "out.txt")
@pytest.mark.parametrize("use_after_idle", [True, False])
def test_smashing_f5(tmp_path, wait_until, use_after_idle):
(tmp_path / "hello.py").write_text("print('Hello')")
run = lambda: no_terminal.run_command(f"{utils.quote(sys.executable)} hello.py", tmp_path)
if use_after_idle:
get_main_window().after_idle(run)
get_main_window().after_idle(run)
get_main_window().after_idle(run)
else:
run()
run()
run()
wait_until(lambda: "The process completed successfully." in get_output())
first_line, rest = get_output().split("\n", 1)
assert first_line.endswith("hello.py")
assert rest == "Hello\nThe process completed successfully."
@pytest.mark.skipif(sys.platform == "win32", reason="can't pause processes on windows")
def test_pause_resume_button(tmp_path, wait_until):
(tmp_path / "sleeper.py").write_text(
"import time; print('before'); time.sleep(0.5); print('after')"
)
no_terminal.run_command(f"{utils.quote(sys.executable)} sleeper.py", tmp_path)
wait_until(lambda: "before" in get_output())
no_terminal.runner.pause_button.event_generate("<Button-1>")
sleep_end = time.time() + 1
wait_until(lambda: time.time() > sleep_end)
assert "after" not in get_output()
no_terminal.runner.pause_button.event_generate("<Button-1>")
wait_until(lambda: "after" in get_output())
def test_stop_button(tmp_path, wait_until):
(tmp_path / "sleeper.py").write_text("import time; print('started'); time.sleep(10)")
no_terminal.run_command(f"{utils.quote(sys.executable)} sleeper.py", tmp_path)
wait_until(lambda: "started" in get_output())
no_terminal.runner.stop_button.event_generate("<Button-1>")
wait_until(lambda: "started\nKilled." in get_output())
def test_stop_button_pressed_after_finished(tmp_path, wait_until):
no_terminal.run_command(f"{utils.quote(sys.executable)} -c pass", tmp_path)
wait_until(lambda: "The process completed successfully." in get_output())
no_terminal.runner.stop_button.event_generate("<Button-1>")
assert "Killed" not in get_output()
def test_infinite_loop(tmp_path, wait_until):
(tmp_path / "loop.py").write_text(
"""\
i = 0
while True:
print(i)
i = i+1
"""
)
no_terminal.run_command(f"{utils.quote(sys.executable)} loop.py", tmp_path)
wait_until(
lambda: get_output().splitlines()[-1].isdecimal()
and int(get_output().splitlines()[-1]) >= 2 * no_terminal.MAX_SCROLLBACK
)
no_terminal.runner.stop_button.event_generate("<Button-1>")
wait_until(lambda: get_output().strip().endswith("Killed."))
lines = get_output().strip().replace("Killed.", "").splitlines()[1:]
assert (
len(lines) < no_terminal.MAX_SCROLLBACK
) # there were more prints, but old output was removed
start = int(lines[0])
assert start > 0
assert lines == [str(i) for i in range(start, start + len(lines))]
|
6dc9ec26fe7b39135c01ff85b9f38f3010639192
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/1003.py
|
9b9f9710863ba62843ec4ed9646f2c848fc517cf
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
1003.py
|
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def isValid(self, S: str) -> bool:
if (len(S) % 3 != 0) or (S[0] != 'a') or (S[-1] != 'c'):
return False
stack = []
S = S.replace('abc', '')
for c in S:
if c == 'c':
if (len(stack) < 2) or (stack[-1] != 'b' or stack[-2] != 'a'):
return False
else:
stack.pop()
stack.pop()
else:
stack.append(c)
return len(stack) == 0
__________________________________________________________________________________________________
sample 13204 kb submission
class Solution:
def isValid(self, S: str) -> bool:
h = []
def try_clean():
if len(h)>=3 and h[-2]=='b' and h[-3]=='a':
h.pop()
h.pop()
h.pop()
for c in S:
h.append(c)
if h[-1]=='c':
try_clean()
return len(h)==0
__________________________________________________________________________________________________
|
ba730a43a4f2139c15f16d1b3c837021fc9a1beb
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/monitored_session.py
|
ac0433f1775feeed2ec3cf49291da01500bef01b
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
monitored_session.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks (deprecated).
These are deprecated aliases for classes and functions in `tf.train`. Please use
those directly.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import monitored_session
# pylint: disable=invalid-name
Scaffold = monitored_session.Scaffold
SessionCreator = monitored_session.SessionCreator
ChiefSessionCreator = monitored_session.ChiefSessionCreator
WorkerSessionCreator = monitored_session.WorkerSessionCreator
MonitoredSession = monitored_session.MonitoredSession
# pylint: disable=invalid-name
|
222251a3134acd6ad5fc0f6a4674c0d786cd209f
|
7a6b4705293709e32a6927ad4f76eb0549f3bea9
|
/orchestra/google_apps/convenience.py
|
5b47cf3e4b54037686a8a69f94f2f4b9c4a52fa8
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
b12io/orchestra
|
a71941d80d1eeddb73f301da8f601b2c31a4b279
|
ee8a29122a3491feae1e1c2c4699142726ae6c21
|
refs/heads/main
| 2023-08-20T17:46:36.360755
| 2023-06-27T13:32:46
| 2023-06-27T13:32:46
| 42,593,972
| 459
| 66
|
Apache-2.0
| 2023-06-27T13:32:48
| 2015-09-16T14:55:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,981
|
py
|
convenience.py
|
import csv
import logging
import os
import re
import requests
import tempfile
from collections import Counter
from datetime import date
from django.conf import settings
from io import StringIO
from orchestra.google_apps.errors import FailedRequest
from orchestra.google_apps.errors import GoogleDriveError
from orchestra.google_apps.errors import InvalidUrlError
from orchestra.google_apps.permissions import read_with_link_permission
from orchestra.google_apps.permissions import write_with_link_permission
from orchestra.google_apps.service import Service
from orchestra.utils.common_regex import image_file_regex
from orchestra.utils.decorators import run_if
logger = logging.getLogger(__name__)
_image_mimetype_regex = re.compile('(image/(?:jpg|jpeg|gif|png|svg))',
re.IGNORECASE)
SCRATCHPAD_TEMPLATE_ID = '1d0kIgq8G_Su6j5abP-tP6yJ2sp-sFDk6vZiREil0_70'
GSPREAD_RE = re.compile('https://docs.google.com/spreadsheets/d/([^/]*)/.*')
GSPREAD_EXPORT_URL = ('https://docs.google.com/spreadsheets/d/'
'{}/export?format=csv')
def _get_image_mimetype(response, title):
"""
Provided http response and an image title
generate an image mimetype.
"""
if (response.headers.get('content-type') and
_image_mimetype_regex.search(response.headers
.get('content-type'))):
return response.headers.get('content-type')
extension = title.split('.')[-1]
return 'image/{}'.format(extension)
@run_if('GOOGLE_APPS')
def add_image(service, folder_id, url):
"""
Add image to a folder.
Args:
service: Drive API service instance.
folder_id: ID of a folder where the image will be stored
url: url to the original image
Returns:
Metadata to an uploaded file
"""
response = requests.get(url, stream=True)
if response.status_code != 200:
raise FailedRequest('Unable to successfully retrieve '
'an image: %s', (url))
temp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
for chunk in response.iter_content(1024):
temp.write(chunk)
title_regex = image_file_regex.search(response.url)
if title_regex is None:
raise InvalidUrlError('Url is not for an image.')
title = title_regex.group()
mimetype = _get_image_mimetype(response, title)
temp.close()
google_image = service.insert_file(title,
'image',
folder_id,
mimetype,
mimetype,
temp.name)
os.unlink(temp.name)
return google_image
@run_if('GOOGLE_APPS')
def create_media_folder_with_images(parent_id, image_links, folder_name):
"""
Given a folder name and a list of image links create a new
google drive folder with images in it.
"""
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
folder = create_folder_with_permissions(parent_id,
folder_name,
[read_with_link_permission])
folder_id = folder['id']
counter = Counter()
for image_link in image_links:
try:
image = add_image(service, folder_id, image_link)
counter['uploaded_images'] += 1
logger.info('Image has been uploaded %s', image)
except (InvalidUrlError, FailedRequest):
counter['not_uploaded_images'] += 1
logger.exception('Failed to retrieve image from %s',
image_link)
return {'folder': folder,
'image_counter': counter}
@run_if('GOOGLE_APPS')
def create_folder_with_permissions(parent_id, folder_name, permissions=None):
"""
Create drive folder in the specified location with given permissions.
"""
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
folder = service.insert_folder(folder_name, parent_id)
if folder is None:
raise GoogleDriveError('Could not create a folder')
permissions = permissions or []
for permission in permissions:
service.add_permission(folder.get('id'),
permission)
return folder
@run_if('GOOGLE_APPS')
def create_project_google_folder(project):
"""
Create drive folder for project information
"""
today = date.today().strftime('%Y-%m-%d')
parent_id = (project.project_data.get('client_folder_id') or
settings.GOOGLE_PROJECT_ROOT_ID)
folder = create_folder_with_permissions(
parent_id,
' '.join((today, project.short_description)),
[write_with_link_permission])
folder_id = folder.get('id')
project.project_data['project_folder_id'] = folder_id
project.scratchpad_url = create_document_from_template(
SCRATCHPAD_TEMPLATE_ID,
'Scratchpad',
[folder_id],
[write_with_link_permission])['alternateLink']
project.save()
return folder
@run_if('GOOGLE_APPS')
def create_document_from_template(template_id, name,
parent_ids=None, permissions=None):
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
upload_info = service.copy_file(template_id, name,
parent_ids=parent_ids)
if upload_info is None:
raise GoogleDriveError('Could not create document {}'.format(name))
logger.info(upload_info)
document_id = upload_info.get('id')
permissions = permissions or []
for permission in permissions:
service.add_permission(document_id, permission)
upload_info['status'] = 'success'
upload_info['id'] = document_id
return upload_info
@run_if('GOOGLE_APPS')
def download_file(file_metadata):
"""Download a file from a google drive folder.
Args:
file_metadata (dict):
A Google Apps API file resource.
Returns:
file_contents (str):
A buffer containing the raw binary contents of the file.
title (str):
The title of the file.
mimetype(str):
The mimetype of the file.
"""
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
mimetype = file_metadata['mimeType']
title = file_metadata['title']
return service.get_file_content(file_metadata['id']), title, mimetype
@run_if('GOOGLE_APPS')
def upload_file(parent_id, file_path, title, description, mimetype):
"""Upload a file to a google drive folder.
Args:
parent_id (str):
Identifier for the drive folder to upload to.
file_path (str):
Local file path to the file to upload.
title (str):
Title for the uploaded document.
description (str):
A description of the file to upload.
mimetype (str):
Mimetype of the uploaded content.
Returns:
file_metadata (dict):
A Google Apps File resource with metadata about the uploaded file.
"""
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
file_metadata = service.insert_file(
title,
description,
parent_id,
mimetype,
mimetype,
file_path
)
return file_metadata
def get_google_spreadsheet_as_csv(gdoc_url, reader=csv.DictReader):
matches = GSPREAD_RE.match(gdoc_url)
if matches is None:
raise ValueError(
'{} is not a valid Google Sheets URL'.format(gdoc_url))
export_url = GSPREAD_EXPORT_URL.format(matches.group(1))
response = requests.get(export_url)
return reader(
StringIO(response.content.decode('utf-8')), dialect=csv.excel)
|
70214475d4dfe53bd6077ca2d48ab835190a0970
|
7442c958dc2522be2e2adcd1d28acf9527d1b5ef
|
/reapy/core/project/region.pyi
|
34c6be6efbcd464a80ffdda029919edbb6093374
|
[
"MIT"
] |
permissive
|
RomeoDespres/reapy
|
0227bef82828521b836548f5b2115ee354eabcd7
|
730627cee6f39fc26d6ebc8a3df0112e5921cd9f
|
refs/heads/master
| 2022-02-21T02:37:47.456052
| 2021-02-11T08:26:33
| 2021-02-11T08:26:33
| 168,827,206
| 104
| 31
|
MIT
| 2022-02-04T19:13:32
| 2019-02-02T12:00:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,411
|
pyi
|
region.pyi
|
import reapy
from reapy import reascript_api as RPR
from reapy.core import ReapyObject
import typing as ty
class Region(ReapyObject):
_class_name = "Region"
project_id: int
index: int
def __init__(self,
parent_project: ty.Optional[reapy.Project] = None,
index: ty.Optional[int] = None,
parent_project_id: ty.Optional[int] = None) -> None:
...
def _get_enum_index(self) -> int:
"""
Return region index as needed by RPR.EnumProjectMarkers2.
"""
...
@property
def _kwargs(self) -> ty.Dict[str, int]:
...
def add_rendered_track(self, track: reapy.Track) -> None:
"""
Add track to region render matrix for this region.
Parameters
----------
track : Track
Track to add.
See also
--------
Region.add_rendered_tracks
Efficiently add several tracks to region render matrix.
Region.remove_rendered_track
Region.remove_rendered_tracks
"""
...
def add_rendered_tracks(self, tracks: ty.List[reapy.Track]) -> None:
"""
Efficiently add several tracks to region render matrix.
Parameters
----------
tracks : list of Track
Tracks to add.
See also
--------
Region.remove_rendered_tracks
"""
...
@property
def end(self) -> float:
"""
Region end.
:type: float
Region end in seconds.
"""
...
@end.setter
def end(self, end: float) -> None:
"""
Set region end.
Parameters
----------
end : float
region end in seconds.
"""
...
def delete(self) -> None:
"""
Delete region.
"""
...
def remove_rendered_track(self, track: reapy.Track) -> None:
"""
Remove track from region render matrix for this region.
Parameters
----------
track : Track
Track to remove.
See also
--------
Region.add_rendered_tracks
Region.remove_rendered_track
Region.remove_rendered_tracks
Efficiently remove several tracks from render matrix.
"""
...
def remove_rendered_tracks(self, tracks: ty.List[reapy.Track]) -> None:
"""
Efficiently remove several tracks from region render matrix.
Parameters
----------
tracks : list of Track
Tracks to remove.
See also
--------
Region.add_rendered_tracks
"""
...
@property
def rendered_tracks(self) -> ty.List[reapy.Track]:
"""
List of tracks for this region in region render matrix.
:type: list of Track
"""
...
@property
def start(self) -> float:
"""
Region start.
:type: float
"""
...
@start.setter
def start(self, start: float) -> None:
"""
Set region start.
Parameters
----------
start : float
region start in seconds.
"""
...
|
61521fe3cb2517f0590ef3475a182e6897f8251b
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/polys/densetools.py
|
ff3ea5f2ea321a9e954202e1cab240e0cf962a88
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 25,902
|
py
|
densetools.py
|
"""Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_lshift,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dup_div,
dup_rem, dmp_rem,
dmp_expand,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
)
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_to_dict,
dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC,
dmp_zero, dmp_ground,
dmp_zero_p,
dup_to_raw_dict, dup_from_raw_dict,
dmp_zeros
)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
DomainError
)
from sympy.utilities import variations
from math import ceil as _ceil, log as _log
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
def dmp_integrate(f, m, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_0`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate(x + 2*y, 1)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate(x + 2*y, 2)
1/6*x**3 + x**2*y
"""
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u - 1, K), u - 1
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
def _rec_integrate_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_integrate_in`."""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
def dmp_integrate_in(f, m, j, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate_in(x + 2*y, 1, 0)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate_in(x + 2*y, 1, 1)
x*y + y**2
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= u expected, got u = %d, j = %d" % (u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
def dup_diff(f, m, K):
"""
``m``-th order derivative of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 1)
3*x**2 + 4*x + 3
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 2)
6*x + 4
"""
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv = []
if m == 1:
for coeff in f[:-m]:
deriv.append(K(n)*coeff)
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(K(k)*coeff)
n -= 1
return dup_strip(deriv)
def dmp_diff(f, m, u, K):
"""
``m``-th order derivative in ``x_0`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff(f, 1)
y**2 + 2*y + 3
>>> R.dmp_diff(f, 2)
0
"""
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, v = [], u - 1
if m == 1:
for coeff in f[:-m]:
deriv.append(dmp_mul_ground(coeff, K(n), v, K))
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(dmp_mul_ground(coeff, K(k), v, K))
n -= 1
return dmp_strip(deriv, u)
def _rec_diff_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_in`."""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_in(f, 1, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_in(f, 1, 1)
2*x*y + 2*x + 4*y + 3
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""
Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_eval(x**2 + 2*x + 3, 2)
11
"""
if not a:
return K.convert(dup_TC(f, K))
result = K.zero
for c in f:
result *= a
result += c
return result
def dmp_eval(f, a, u, K):
"""
Evaluate a polynomial at ``x_0 = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_eval(2*x*y + 3*x + y + 2, 2)
5*y + 8
"""
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u - 1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_in(f, 2, 0)
5*y + 8
>>> R.dmp_eval_in(f, 2, 1)
7*x + 4
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_eval_in(f, a, u, 0, j, K)
def _rec_eval_tail(g, i, A, u, K):
"""Recursive helper for :func:`dmp_eval_tail`."""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i + 1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u + i - 1], K)
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_tail(f, [2])
7*x + 4
>>> R.dmp_eval_tail(f, [2, 2])
18
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A) - 1:
return e
else:
return dmp_strip(e, u - len(A))
def _rec_diff_eval(g, m, a, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_eval`."""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_eval_in(f, 1, 2, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_eval_in(f, 1, 2, 1)
6*x + 11
"""
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce a ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_trunc(2*x**3 + 3*x**2 + 5*x + 7, ZZ(3))
-x**3 - x + 1
"""
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
def dmp_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a polynomial ``p`` in ``K[Y]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> g = (y - 1).drop(x)
>>> R.dmp_trunc(f, g)
11*x**2 + 11*x + 5
"""
return dmp_strip([ dmp_rem(c, p, u - 1, K) for c in f ], u)
def dmp_ground_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_trunc(f, ZZ(3))
-x**2 - x*y - y
"""
if not u:
return dup_trunc(f, p, K)
v = u - 1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_monic(3*x**2 + 6*x + 9)
x**2 + 2*x + 3
>>> R, x = ring("x", QQ)
>>> R.dup_monic(3*x**2 + 4*x + 2)
x**2 + 4/3*x + 2/3
"""
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_exquo_ground(f, lc, K)
def dmp_ground_monic(f, u, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 6*x**2 + 3*x*y + 9*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 2*x**2 + x*y + 3*y + 1
>>> R, x,y = ring("x,y", QQ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 8/3*x**2 + 5/3*x*y + 2*x + 2/3*y + 1
"""
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_exquo_ground(f, lc, u, K)
def dup_content(f, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
"""
from sympy.polys.domains import QQ
if not f:
return K.zero
cont = K.zero
if K == QQ:
for c in f:
cont = K.gcd(cont, c)
else:
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
"""
from sympy.polys.domains import QQ
if not u:
return dup_content(f, K)
if dmp_zero_p(f, u):
return K.zero
cont, v = K.zero, u - 1
if K == QQ:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
else:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
if K.is_one(cont):
break
return cont
def dup_primitive(f, K):
"""
Compute content and the primitive form of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
"""
if not f:
return K.zero, f
cont = dup_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, dup_quo_ground(f, cont, K)
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
"""
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_quo_ground(f, cont, u, K)
def dup_extract(f, g, K):
"""
Extract common content from a pair of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_extract(6*x**2 + 12*x + 18, 4*x**2 + 8*x + 12)
(2, 3*x**2 + 6*x + 9, 2*x**2 + 4*x + 6)
"""
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_quo_ground(f, gcd, K)
g = dup_quo_ground(g, gcd, K)
return gcd, f, g
def dmp_ground_extract(f, g, u, K):
"""
Extract common content from a pair of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_ground_extract(6*x*y + 12*x + 18, 4*x*y + 8*x + 12)
(2, 3*x*y + 6*x + 9, 2*x*y + 4*x + 6)
"""
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_quo_ground(f, gcd, u, K)
g = dmp_quo_ground(g, gcd, u, K)
return gcd, f, g
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dup_real_imag(x**3 + x**2 + x + 1)
(x**3 + x**2 - 3*x*y**2 + x - y**2 + 1, 3*x**2*y + 2*x*y - y**3 + y)
"""
if not K.is_ZZ and not K.is_QQ:
raise DomainError("computing real and imaginary parts is not supported over %s" % K)
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dup_to_raw_dict(h)
for k, h in H.items():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mirror(x**3 + 2*x**2 - 4*x + 2)
-x**3 + 2*x**2 + 4*x + 2
"""
f = list(f)
for i in range(len(f) - 2, -1, -2):
f[i] = -f[i]
return f
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_scale(x**2 - 2*x + 1, ZZ(2))
4*x**2 - 4*x + 1
"""
f, n, b = list(f), len(f) - 1, a
for i in range(n - 1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_shift(x**2 - 2*x + 1, ZZ(2))
x**2 + 2*x + 1
"""
f, n = list(f), len(f) - 1
for i in range(n, 0, -1):
for j in range(0, i):
f[j + 1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_transform(x**2 - 2*x + 1, x**2 + 1, x - 1)
x**4 - 2*x**3 + 5*x**2 - 4*x + 4
"""
if not f:
return []
n = len(f) - 1
h, Q = [f[0]], [[K.one]]
for i in range(0, n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""
Evaluate functional composition ``f(g)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_compose(x**2 + x, x - 1)
x**2 - x
"""
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_compose(x*y + 2*x + y, y)
y**2 + 3*y
"""
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
def _dup_right_decompose(f, s, K):
"""Helper function for :func:`_dup_decompose`."""
n = len(f) - 1
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s: K.one }
r = n // s
for i in range(1, s):
coeff = K.zero
for j in range(0, i):
if not n + j - i in f:
continue
if not s - j in g:
continue
fc, gc = f[n + j - i], g[s - j]
coeff += (i - r*j)*fc*gc
g[s - i] = K.quo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
def _dup_left_decompose(f, h, K):
"""Helper function for :func:`_dup_decompose`."""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
def _dup_decompose(f, K):
"""Helper function for :func:`dup_decompose`."""
df = len(f) - 1
for s in range(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""
Computes functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_decompose(x**4 - 2*x**3 + x**2)
[x**2, x**2 - x]
References
==========
.. [1] [Kozen89]_
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dmp_lift(f, u, K):
"""
Convert algebraic coefficients to integers in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> from sympy import I
>>> K = QQ.algebraic_field(I)
>>> R, x = ring("x", K)
>>> f = x**2 + K([QQ(1), QQ(0)])*x + K([QQ(2), QQ(0)])
>>> R.dmp_lift(f)
x**8 + 2*x**6 + 9*x**4 - 8*x**2 + 16
"""
if K.is_GaussianField:
K1 = K.as_AlgebraicField()
f = dmp_convert(f, u, K, K1)
K = K1
if not K.is_Algebraic:
raise DomainError(
'computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.items():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""
Compute the number of sign variations of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sign_variations(x**4 - x**2 - x + 1)
2
"""
prev, k = K.zero, 0
for coeff in f:
if K.is_negative(coeff*prev):
k += 1
if coeff:
prev = coeff
return k
def dup_clear_denoms(f, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)
>>> R.dup_clear_denoms(f, convert=False)
(6, 3*x + 2)
>>> R.dup_clear_denoms(f, convert=True)
(6, 3*x + 2)
"""
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not convert:
return common, f
else:
return common, dup_convert(f, K0, K1)
def _rec_clear_denoms(g, v, K0, K1):
"""Recursive helper for :func:`dmp_clear_denoms`."""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v - 1
for c in g:
common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1))
return common
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)*y + 1
>>> R.dmp_clear_denoms(f, convert=False)
(6, 3*x + 2*y + 6)
>>> R.dmp_clear_denoms(f, convert=True)
(6, 3*x + 2*y + 6)
"""
if not u:
return dup_clear_denoms(f, K0, K1, convert=convert)
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = _rec_clear_denoms(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
def dup_revert(f, n, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
This function computes first ``2**n`` terms of a polynomial that
is a result of inversion of a polynomial modulo ``x**n``. This is
useful to efficiently compute series expansion of ``1/f``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = -QQ(1,720)*x**6 + QQ(1,24)*x**4 - QQ(1,2)*x**2 + 1
>>> R.dup_revert(f, 8)
61/720*x**6 + 5/24*x**4 + 1/2*x**2 + 1
"""
g = [K.revert(dup_TC(f, K))]
h = [K.one, K.zero, K.zero]
N = int(_ceil(_log(n, 2)))
for i in range(1, N + 1):
a = dup_mul_ground(g, K(2), K)
b = dup_mul(f, dup_sqr(g, K), K)
g = dup_rem(dup_sub(a, b, K), h, K)
h = dup_lshift(h, dup_degree(h), K)
return g
def dmp_revert(f, g, u, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
"""
if not u:
return dup_revert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
|
67a1fe8f86e221935422959a75c715200edf086f
|
933b0eef6909e52fb086015e1f73e9507aad9c3f
|
/Image_Processing/src/basics/basic.py
|
7a6182c7aa7b90aaefe9de1e03ff6d21d08b7947
|
[] |
no_license
|
codezoned/ScriptsDump
|
c105641ee06b8bf148b9c2779b04eb1d0369a85f
|
df6fcc47f5a73c7a5c3522f985e23b89fe56117e
|
refs/heads/master
| 2023-04-29T03:12:40.610817
| 2022-11-26T12:22:33
| 2022-11-26T12:22:33
| 141,881,901
| 157
| 221
| null | 2023-08-10T07:00:05
| 2018-07-22T09:17:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
basic.py
|
# coding: utf8
# Python implementation of basic image processing
# Author: Caio Cesar Viana da Silva
# Install scikit-image: pip install scikit-image
# import skimage.io as io
# from matplotlib import pyplot as plt
# import numpy as np
# from skimage.transform import AffineTransform, warp
# import skimage.transform as ski
#OPENING AN IMAGE
def open_img(img_path):
import skimage.io as io
img = io.imread(img_path)
io.imshow(img)
io.show()
return img
#VISUALIZE HISTOGRAM
def histogram_img(img_path):
import skimage.io as io
from matplotlib import pyplot as plt
img = io.imread(img_path)
plt.hist(img.ravel(),256,[0,256])
plt.show()
#RGB HISTOGRAM
def histogram_rgb_img(img_path):
import skimage.io as io
from matplotlib import pyplot as plt
img = io.imread(img_path)
color = [ 'r','g','b']
for i, c in enumerate(color) :
plt.hist(img[:,:,i].flatten(),256, color=c)
plt.show()
#RGB TO GRAYSCALE
def rgb_2_gray(img_path):
import numpy as np
import matplotlib.pyplot as plt
img = open_img(img_path)
gray = np.dot(img[...,:3], [0.299, 0.587, 0.114])
plt.imshow(gray, cmap = plt.get_cmap('gray'))
plt.show()
#SCALING IMAGE
def scaling_img(img_path):
import skimage.io as io
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(1,8))
img = io.imread(img_path)
plt.imshow(img)
plt.show()
#TRANSLATING IMAGE
def translating_img(img_path, vector):
import matplotlib.pyplot as plt
from skimage.transform import AffineTransform, warp
img = open_img(img_path)
transform = AffineTransform(translation=vector)
shifted = warp(img, transform, mode='constant', preserve_range=True)
plt.imshow(shifted)
plt.show()
#ROTATING IMAGE
def rotating_img(img_path, degree):
import skimage.io as io
import skimage.transform as ski
img = open_img(img_path)
imgR = ski.rotate(img,degree)
io.imshow(imgR)
io.show()
def main():
open_img('test.jpg')
histogram_img('test.jpg')
histogram_rgb_img('test.jpg')
rgb_2_gray('test.jpg')
scaling_img('test.jpg')
translating_img('test.jpg',[-100, -100])
rotating_img('test.jpg',45)
if __name__ == "__main__":
main()
|
8d67905491177e0fb5322122328fd9638a9f801f
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/scattercarpet/_hoverlabel.py
|
55feac5cd2591c9560f7680e490df2d20f3fcd2f
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
_hoverlabel.py
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="scattercarpet", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
""",
),
**kwargs,
)
|
380ab4748f7e9cfc6c93d6737863510eb2278d67
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/autoscaler/_private/legacy_info_string.py
|
830078345455218a005f7cbf734d0f7c88d93b98
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
legacy_info_string.py
|
import logging
from ray._private.ray_constants import DEBUG_AUTOSCALING_STATUS_LEGACY
from ray.experimental.internal_kv import _internal_kv_initialized, _internal_kv_put
"""This file provides legacy support for the old info string in order to
ensure the dashboard's `api/cluster_status` does not break backwards
compatibilty.
"""
logger = logging.getLogger(__name__)
def legacy_log_info_string(autoscaler, nodes):
tmp = "Cluster status: "
tmp += info_string(autoscaler, nodes)
tmp += "\n"
tmp += autoscaler.load_metrics.info_string()
tmp += "\n"
tmp += autoscaler.resource_demand_scheduler.debug_string(
nodes,
autoscaler.pending_launches.breakdown(),
autoscaler.load_metrics.get_resource_utilization(),
)
if _internal_kv_initialized():
_internal_kv_put(DEBUG_AUTOSCALING_STATUS_LEGACY, tmp, overwrite=True)
logger.debug(tmp)
def info_string(autoscaler, nodes):
suffix = ""
if autoscaler.updaters:
suffix += " ({} updating)".format(len(autoscaler.updaters))
if autoscaler.num_failed_updates:
suffix += " ({} failed to update)".format(len(autoscaler.num_failed_updates))
return "{} nodes{}".format(len(nodes), suffix)
|
61b24636b3f4387d3fe01df1422400d1fbd3a262
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/mlrun/features.py
|
369c65b25bb19fcd6a4d15e6387310c0b5d857cd
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 15,624
|
py
|
features.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import re
from typing import Dict, List, Optional, Union
from .data_types import ValueType, python_type_to_value_type
from .errors import MLRunRuntimeError, err_to_str
from .model import ModelObj
def _limited_string(value: str, max_size: int = 40):
"""
Provide limited string size, typically for reporting original value
in case of error (and for better identification of error location
based on presenting part of original value)
"""
return (
value
if (value is None) or (len(value) <= max_size)
else value[:max_size] + "..."
)
class Entity(ModelObj):
"""data entity (index)"""
kind = "entity"
def __init__(
self,
name: str = None,
value_type: Union[ValueType, str] = None,
description: str = None,
labels: Optional[Dict[str, str]] = None,
):
"""data entity (index key)
:param name: entity name
:param value_type: type of the entity, e.g. ValueType.STRING, ValueType.INT (default ValueType.STRING)
:param description: test description of the entity
:param labels: a set of key/value labels (tags)
"""
self.name = name
self.description = description
self.value_type = ValueType(value_type) if value_type else None
if name and not value_type:
self.value_type = ValueType.STRING
self.labels = labels or {}
def __eq__(self, other):
return self.name == other.name
class Feature(ModelObj):
"""data feature"""
_dict_fields = [
"name",
"description",
"value_type",
"dims",
"default",
"labels",
"aggregate",
"validator",
"origin",
]
def __init__(
self,
value_type: Union[ValueType, str] = None,
dims: List[int] = None,
description: str = None,
aggregate: bool = None,
name: str = None,
validator=None,
default: str = None,
labels: Dict[str, str] = None,
):
"""data feature
Features can be specified manually or inferred automatically (during ingest/preview)
:param value_type: type of the feature. Use the ValueType constants library e.g. ValueType.STRING,
ValueType.INT (default ValueType.STRING)
:param dims: list of dimensions for vectors/tensors, e.g. [2, 2]
:param description: text description of the feature
:param aggregate: is it an aggregated value
:param name: name of the feature
:param validator: feature validation policy
:param default: default value
:param labels: a set of key/value labels (tags)
"""
self.name = name or ""
if isinstance(value_type, ValueType):
self.value_type = value_type
elif value_type is not None:
self.value_type = python_type_to_value_type(value_type)
else:
self.value_type = ValueType.STRING
self.dims = dims
self.description = description
self.default = default
self.labels = labels or {}
self.aggregate = aggregate
self.origin = None # used to link the feature to the feature set origin (inside vector.status)
self._validator = validator
@property
def validator(self):
return self._validator
@validator.setter
def validator(self, validator):
if isinstance(validator, dict):
kind = validator.get("kind")
validator = validator_kinds[kind].from_dict(validator)
self._validator = validator
class BasicTypeValidator:
def __init__(self):
pass
def check(self, value_type, value):
return True, {}
class ConvertTypeValidator(BasicTypeValidator):
def __init__(self, func):
super().__init__()
self.func = func
def check(self, value_type, value):
ok, args = super().check(value_type, value)
if ok:
try:
self.func(value)
except Exception as err:
return (
False,
{"message": err_to_str(err), "type": value_type},
)
return ok, args
class RangeTypeValidator(BasicTypeValidator):
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def check(self, value_type, value):
ok, args = super().check(value_type, value)
if ok:
try:
if value < self.min:
return (
False,
{
"message": "Value is smaller than min range",
"type": value_type,
"min range": self.min,
"value": _limited_string(value),
},
)
if value > self.max:
return (
False,
{
"message": "Value is greater than max range",
"type": value_type,
"max range": self.max,
"value": _limited_string(value),
},
)
except Exception as err:
return (
False,
{"message": err_to_str(err), "type": value_type},
)
return ok, args
# TODO: add addition validation for commented types
type_validator = {
# ValueType.BOOL: it does not make sense to do validation for BOOL (everything is True or False by default)
ValueType.INT8: RangeTypeValidator(-128, 127),
ValueType.INT16: RangeTypeValidator(-32768, 32767),
ValueType.INT32: RangeTypeValidator(-2147483648, 2147483647),
ValueType.INT64: RangeTypeValidator(-9223372036854775808, 9223372036854775807),
ValueType.INT128: RangeTypeValidator(-math.pow(2, 127), math.pow(2, 127) - 1),
ValueType.UINT8: RangeTypeValidator(0, 255),
ValueType.UINT16: RangeTypeValidator(0, 65535),
ValueType.UINT32: RangeTypeValidator(0, 4294967295),
ValueType.UINT64: RangeTypeValidator(0, 18446744073709551615),
ValueType.UINT128: RangeTypeValidator(0, math.pow(2, 128)),
# ValueType.FLOAT16: None,
ValueType.FLOAT: ConvertTypeValidator(float),
ValueType.DOUBLE: ConvertTypeValidator(float),
# ValueType.BFLOAT16: None,
ValueType.BYTES: ConvertTypeValidator(bytes),
# ValueType.STRING: it does not make sense to do validation for STRING (everything is valid also '\x00', '\xff')
# ValueType.DATETIME: None,
# ValueType.BYTES_LIST: None,
# ValueType.STRING_LIST: None,
# ValueType.INT32_LIST: None,
# ValueType.INT64_LIST: None,
# ValueType.DOUBLE_LIST: None,
# ValueType.FLOAT_LIST: None,
# ValueType.BOOL_LIST: None,
}
class Validator(ModelObj):
"""Base validator"""
kind = ""
_dict_fields = ["kind", "check_type", "severity"]
def __init__(self, check_type: bool = None, severity: str = None):
"""Base validator
example::
from mlrun.features import Validator
# Add validator to the feature 'bid' with check type
quotes_set["bid"].validator = Validator(
check_type=True,
severity="info"
)
:param check_type: check feature type e.g. True, False
:param severity: severity name e.g. info, warning, etc.
"""
self._feature = None
self.check_type = check_type
self.severity = severity
def set_feature(self, feature: Feature):
self._feature = feature
def check(self, value):
if self.check_type:
if self._feature.value_type is not None:
if self._feature.value_type in type_validator:
return type_validator[self._feature.value_type].check(
self._feature.value_type, value
)
return True, {}
class MinMaxValidator(Validator):
"""Validate min/max value ranges"""
kind = "minmax"
_dict_fields = Validator._dict_fields + ["min", "max"]
def __init__(
self, check_type: bool = None, severity: str = None, min=None, max=None
):
"""Validate min/max value ranges
example::
from mlrun.features import MinMaxValidator
# Add validator to the feature 'bid', where valid
# minimal value is 52
quotes_set["bid"].validator = MinMaxValidator(
min=52,
severity="info"
)
:param check_type: check feature type e.g. True, False
:param severity: severity name e.g. info, warning, etc.
:param min: minimal valid size
:param max: maximal valid size
"""
super().__init__(check_type, severity)
self.min = min
self.max = max
def check(self, value):
ok, args = super().check(value)
if ok:
try:
if self.min is not None:
if value < self.min:
return (
False,
{
"message": "value is smaller than min",
"min": self.min,
"value": _limited_string(str(value)),
},
)
if self.max is not None:
if value > self.max:
return (
False,
{
"message": "value is greater than max",
"max": self.max,
"value": _limited_string(str(value)),
},
)
except Exception as err:
return (
False,
{"message": err_to_str(err), "type": self.kind},
)
return ok, args
class MinMaxLenValidator(Validator):
"""Validate min/max length value ranges"""
kind = "minmaxlen"
_dict_fields = Validator._dict_fields + ["min", "max"]
def __init__(
self, check_type: bool = None, severity: str = None, min=None, max=None
):
"""Validate min/max length value ranges
example::
from mlrun.features import MinMaxLenValidator
# Add length validator to the feature 'ticker', where valid
# minimal length is 1 and maximal length is 10
quotes_set["ticker"].validator = MinMaxLenValidator(
min=1,
max=10,
severity="info"
)
:param check_type: check feature type e.g. True, False
:param severity: severity name e.g. info, warning, etc.
:param min: minimal valid length size
:param max: maximal valid length size
"""
super().__init__(check_type, severity)
self.min = min
self.max = max
def check(self, value):
ok, args = super().check(value)
if ok:
try:
if self.min is not None:
if len(value) < self.min:
return (
False,
{
"message": "Length value is smaller than min",
"min": self.min,
"length value": len(value),
},
)
if self.max is not None:
if len(value) > self.max:
return (
False,
{
"message": "Length value is greater than max",
"max": self.max,
"length value": len(value),
},
)
except Exception as err:
return (
False,
{"message": err_to_str(err), "type": self.kind},
)
return ok, args
class RegexValidator(Validator):
"""Validate value based on regular expression"""
kind = "regex"
_dict_fields = Validator._dict_fields + ["regex"]
def __init__(self, check_type: bool = None, severity: str = None, regex=None):
"""Validate value based on regular expression
example::
from mlrun.features import RegexValidator
# Add regular expression validator to the feature 'name' and
# expression '(\b[A-Za-z]{1}[0-9]{7}\b)' where valid values are
# e.g. A1234567, z9874563, etc.
quotes_set["name"].validator = RegexValidator(
regex=r"(\b[A-Za-z]{1}[0-9]{7}\b)",
severity="info"
)
:param check_type: check feature type e.g. True, False
:param severity: severity name e.g. info, warning, etc.
:param regex: regular expression for validation
"""
super().__init__(check_type, severity)
self.regex = regex
self.regex_compile = re.compile(self.regex) if self.regex else None
def check(self, value):
ok, args = super().check(value)
if ok:
try:
if self.regex is not None:
if not re.fullmatch(self.regex_compile, value):
return (
False,
{
"message": "Value is not valid with regular expression",
"regexp": self.regex,
"value": _limited_string(str(value)),
},
)
except Exception as err:
return (
False,
{"message": err_to_str(err), "type": self.kind},
)
return ok, args
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
new_obj = super(RegexValidator, cls).from_dict(
struct=struct, fields=fields, deprecated_fields=deprecated_fields
)
if hasattr(new_obj, "regex"):
new_obj.regex_compile = re.compile(new_obj.regex) if new_obj.regex else None
else:
raise MLRunRuntimeError(
f"Object with type {type(new_obj)} "
f"have to contain `regex` attribute"
)
return new_obj
validator_kinds = {
"": Validator,
"minmax": MinMaxValidator,
"minmaxlen": MinMaxLenValidator,
"regex": RegexValidator,
}
|
66927cae94203f96490e4ee4372ce4e49b9dbb19
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/opensensemap/__init__.py
|
e03f4133d88188e30840df17d6947e478b266dc2
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
__init__.py
|
"""The opensensemap component."""
|
13b49e2d6e3be068920948002c56e94750b8468c
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/quantum/azext_quantum/vendored_sdks/azure_quantum/models/_models_py3.py
|
2e78ff44a31a479819c98826e295a94fb8504e42
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 25,868
|
py
|
_models_py3.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
import __init__ as _models
class BlobDetails(msrest.serialization.Model):
"""Blob details.
All required parameters must be populated in order to send to Azure.
:ivar container_name: Required. The container name.
:vartype container_name: str
:ivar blob_name: The blob name.
:vartype blob_name: str
"""
_validation = {
'container_name': {'required': True},
}
_attribute_map = {
'container_name': {'key': 'containerName', 'type': 'str'},
'blob_name': {'key': 'blobName', 'type': 'str'},
}
def __init__(
self,
*,
container_name: str,
blob_name: Optional[str] = None,
**kwargs
):
"""
:keyword container_name: Required. The container name.
:paramtype container_name: str
:keyword blob_name: The blob name.
:paramtype blob_name: str
"""
super(BlobDetails, self).__init__(**kwargs)
self.container_name = container_name
self.blob_name = blob_name
class CostEstimate(msrest.serialization.Model):
"""The job cost billed by the provider. The final cost on your bill might be slightly different due to added taxes and currency conversion rates.
:ivar currency_code: The currency code.
:vartype currency_code: str
:ivar events: List of usage events.
:vartype events: list[~azure.quantum._client.models.UsageEvent]
:ivar estimated_total: The estimated total.
:vartype estimated_total: float
"""
_attribute_map = {
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'events': {'key': 'events', 'type': '[UsageEvent]'},
'estimated_total': {'key': 'estimatedTotal', 'type': 'float'},
}
def __init__(
self,
*,
currency_code: Optional[str] = None,
events: Optional[List["_models.UsageEvent"]] = None,
estimated_total: Optional[float] = None,
**kwargs
):
"""
:keyword currency_code: The currency code.
:paramtype currency_code: str
:keyword events: List of usage events.
:paramtype events: list[~azure.quantum._client.models.UsageEvent]
:keyword estimated_total: The estimated total.
:paramtype estimated_total: float
"""
super(CostEstimate, self).__init__(**kwargs)
self.currency_code = currency_code
self.events = events
self.estimated_total = estimated_total
class ErrorData(msrest.serialization.Model):
"""An error response from Azure.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for displaying in a user
interface.
:vartype message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:paramtype code: str
:keyword message: A message describing the error, intended to be suitable for displaying in a
user interface.
:paramtype message: str
"""
super(ErrorData, self).__init__(**kwargs)
self.code = code
self.message = message
class JobDetails(msrest.serialization.Model):
"""Job details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The job id.
:vartype id: str
:ivar name: The job name. Is not required for the name to be unique and it's only used for
display purposes.
:vartype name: str
:ivar container_uri: Required. The blob container SAS uri, the container is used to host job
data.
:vartype container_uri: str
:ivar input_data_uri: The input blob SAS uri, if specified, it will override the default input
blob in the container.
:vartype input_data_uri: str
:ivar input_data_format: Required. The format of the input data.
:vartype input_data_format: str
:ivar input_params: The input parameters for the job. JSON object used by the target solver. It
is expected that the size of this object is small and only used to specify parameters for the
execution target, not the input data.
:vartype input_params: any
:ivar provider_id: Required. The unique identifier for the provider.
:vartype provider_id: str
:ivar target: Required. The target identifier to run the job.
:vartype target: str
:ivar metadata: The job metadata. Metadata provides client the ability to store client-specific
information.
:vartype metadata: dict[str, str]
:ivar output_data_uri: The output blob SAS uri. When a job finishes successfully, results will
be uploaded to this blob.
:vartype output_data_uri: str
:ivar output_data_format: The format of the output data.
:vartype output_data_format: str
:ivar status: The job status. Known values are: "Waiting", "Executing", "Succeeded", "Failed",
"Cancelled".
:vartype status: str or ~azure.quantum._client.models.JobStatus
:ivar creation_time: The creation time of the job.
:vartype creation_time: ~datetime.datetime
:ivar begin_execution_time: The time when the job began execution.
:vartype begin_execution_time: ~datetime.datetime
:ivar end_execution_time: The time when the job finished execution.
:vartype end_execution_time: ~datetime.datetime
:ivar cancellation_time: The time when a job was successfully cancelled.
:vartype cancellation_time: ~datetime.datetime
:ivar cost_estimate: The job cost billed by the provider. The final cost on your bill might be
slightly different due to added taxes and currency conversion rates.
:vartype cost_estimate: ~azure.quantum._client.models.CostEstimate
:ivar error_data: The error data for the job. This is expected only when Status 'Failed'.
:vartype error_data: ~azure.quantum._client.models.ErrorData
:ivar tags: A set of tags. List of user-supplied tags associated with the job.
:vartype tags: list[str]
"""
_validation = {
'container_uri': {'required': True},
'input_data_format': {'required': True},
'provider_id': {'required': True},
'target': {'required': True},
'status': {'readonly': True},
'creation_time': {'readonly': True},
'begin_execution_time': {'readonly': True},
'end_execution_time': {'readonly': True},
'cancellation_time': {'readonly': True},
'cost_estimate': {'readonly': True},
'error_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'container_uri': {'key': 'containerUri', 'type': 'str'},
'input_data_uri': {'key': 'inputDataUri', 'type': 'str'},
'input_data_format': {'key': 'inputDataFormat', 'type': 'str'},
'input_params': {'key': 'inputParams', 'type': 'object'},
'provider_id': {'key': 'providerId', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': '{str}'},
'output_data_uri': {'key': 'outputDataUri', 'type': 'str'},
'output_data_format': {'key': 'outputDataFormat', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'begin_execution_time': {'key': 'beginExecutionTime', 'type': 'iso-8601'},
'end_execution_time': {'key': 'endExecutionTime', 'type': 'iso-8601'},
'cancellation_time': {'key': 'cancellationTime', 'type': 'iso-8601'},
'cost_estimate': {'key': 'costEstimate', 'type': 'CostEstimate'},
'error_data': {'key': 'errorData', 'type': 'ErrorData'},
'tags': {'key': 'tags', 'type': '[str]'},
}
def __init__(
self,
*,
container_uri: str,
input_data_format: str,
provider_id: str,
target: str,
id: Optional[str] = None,
name: Optional[str] = None,
input_data_uri: Optional[str] = None,
input_params: Optional[Any] = None,
metadata: Optional[Dict[str, str]] = None,
output_data_uri: Optional[str] = None,
output_data_format: Optional[str] = None,
tags: Optional[List[str]] = None,
**kwargs
):
"""
:keyword id: The job id.
:paramtype id: str
:keyword name: The job name. Is not required for the name to be unique and it's only used for
display purposes.
:paramtype name: str
:keyword container_uri: Required. The blob container SAS uri, the container is used to host job
data.
:paramtype container_uri: str
:keyword input_data_uri: The input blob SAS uri, if specified, it will override the default
input blob in the container.
:paramtype input_data_uri: str
:keyword input_data_format: Required. The format of the input data.
:paramtype input_data_format: str
:keyword input_params: The input parameters for the job. JSON object used by the target solver.
It is expected that the size of this object is small and only used to specify parameters for
the execution target, not the input data.
:paramtype input_params: any
:keyword provider_id: Required. The unique identifier for the provider.
:paramtype provider_id: str
:keyword target: Required. The target identifier to run the job.
:paramtype target: str
:keyword metadata: The job metadata. Metadata provides client the ability to store
client-specific information.
:paramtype metadata: dict[str, str]
:keyword output_data_uri: The output blob SAS uri. When a job finishes successfully, results
will be uploaded to this blob.
:paramtype output_data_uri: str
:keyword output_data_format: The format of the output data.
:paramtype output_data_format: str
:keyword tags: A set of tags. List of user-supplied tags associated with the job.
:paramtype tags: list[str]
"""
super(JobDetails, self).__init__(**kwargs)
self.id = id
self.name = name
self.container_uri = container_uri
self.input_data_uri = input_data_uri
self.input_data_format = input_data_format
self.input_params = input_params
self.provider_id = provider_id
self.target = target
self.metadata = metadata
self.output_data_uri = output_data_uri
self.output_data_format = output_data_format
self.status = None
self.creation_time = None
self.begin_execution_time = None
self.end_execution_time = None
self.cancellation_time = None
self.cost_estimate = None
self.error_data = None
self.tags = tags
class JobDetailsList(msrest.serialization.Model):
"""List of job details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.quantum._client.models.JobDetails]
:ivar count: Total records count number.
:vartype count: long
:ivar next_link: Link to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobDetails]'},
'count': {'key': 'count', 'type': 'long'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
**kwargs
):
"""
:keyword count: Total records count number.
:paramtype count: long
"""
super(JobDetailsList, self).__init__(**kwargs)
self.value = None
self.count = count
self.next_link = None
class JsonPatchDocument(msrest.serialization.Model):
"""A JSONPatch document as defined by RFC 6902.
All required parameters must be populated in order to send to Azure.
:ivar op: Required. The operation to be performed. Known values are: "add", "remove",
"replace", "move", "copy", "test".
:vartype op: str or ~azure.quantum._client.models.JsonPatchOperation
:ivar path: Required. A JSON-Pointer.
:vartype path: str
:ivar value: A value to be used in the operation on the path.
:vartype value: any
:ivar from_property: Optional field used in copy and move operations.
:vartype from_property: str
"""
_validation = {
'op': {'required': True},
'path': {'required': True},
}
_attribute_map = {
'op': {'key': 'op', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'from_property': {'key': 'from', 'type': 'str'},
}
def __init__(
self,
*,
op: Union[str, "_models.JsonPatchOperation"],
path: str,
value: Optional[Any] = None,
from_property: Optional[str] = None,
**kwargs
):
"""
:keyword op: Required. The operation to be performed. Known values are: "add", "remove",
"replace", "move", "copy", "test".
:paramtype op: str or ~azure.quantum._client.models.JsonPatchOperation
:keyword path: Required. A JSON-Pointer.
:paramtype path: str
:keyword value: A value to be used in the operation on the path.
:paramtype value: any
:keyword from_property: Optional field used in copy and move operations.
:paramtype from_property: str
"""
super(JsonPatchDocument, self).__init__(**kwargs)
self.op = op
self.path = path
self.value = value
self.from_property = from_property
class ProviderStatus(msrest.serialization.Model):
"""Providers status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Provider id.
:vartype id: str
:ivar current_availability: Provider availability. Known values are: "Available", "Degraded",
"Unavailable".
:vartype current_availability: str or ~azure.quantum._client.models.ProviderAvailability
:ivar targets:
:vartype targets: list[~azure.quantum._client.models.TargetStatus]
"""
_validation = {
'id': {'readonly': True},
'current_availability': {'readonly': True},
'targets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'current_availability': {'key': 'currentAvailability', 'type': 'str'},
'targets': {'key': 'targets', 'type': '[TargetStatus]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProviderStatus, self).__init__(**kwargs)
self.id = None
self.current_availability = None
self.targets = None
class ProviderStatusList(msrest.serialization.Model):
"""Providers status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.quantum._client.models.ProviderStatus]
:ivar next_link: Link to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProviderStatus]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProviderStatusList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Quota(msrest.serialization.Model):
"""Quota information.
:ivar dimension: The name of the dimension associated with the quota.
:vartype dimension: str
:ivar scope: The scope at which the quota is applied. Known values are: "Workspace",
"Subscription".
:vartype scope: str or ~azure.quantum._client.models.DimensionScope
:ivar provider_id: The unique identifier for the provider.
:vartype provider_id: str
:ivar utilization: The amount of the usage that has been applied for the current period.
:vartype utilization: float
:ivar holds: The amount of the usage that has been reserved but not applied for the current
period.
:vartype holds: float
:ivar limit: The maximum amount of usage allowed for the current period.
:vartype limit: float
:ivar period: The time period in which the quota's underlying meter is accumulated. Based on
calendar year. 'None' is used for concurrent quotas. Known values are: "None", "Monthly".
:vartype period: str or ~azure.quantum._client.models.MeterPeriod
"""
_attribute_map = {
'dimension': {'key': 'dimension', 'type': 'str'},
'scope': {'key': 'scope', 'type': 'str'},
'provider_id': {'key': 'providerId', 'type': 'str'},
'utilization': {'key': 'utilization', 'type': 'float'},
'holds': {'key': 'holds', 'type': 'float'},
'limit': {'key': 'limit', 'type': 'float'},
'period': {'key': 'period', 'type': 'str'},
}
def __init__(
self,
*,
dimension: Optional[str] = None,
scope: Optional[Union[str, "_models.DimensionScope"]] = None,
provider_id: Optional[str] = None,
utilization: Optional[float] = None,
holds: Optional[float] = None,
limit: Optional[float] = None,
period: Optional[Union[str, "_models.MeterPeriod"]] = None,
**kwargs
):
"""
:keyword dimension: The name of the dimension associated with the quota.
:paramtype dimension: str
:keyword scope: The scope at which the quota is applied. Known values are: "Workspace",
"Subscription".
:paramtype scope: str or ~azure.quantum._client.models.DimensionScope
:keyword provider_id: The unique identifier for the provider.
:paramtype provider_id: str
:keyword utilization: The amount of the usage that has been applied for the current period.
:paramtype utilization: float
:keyword holds: The amount of the usage that has been reserved but not applied for the current
period.
:paramtype holds: float
:keyword limit: The maximum amount of usage allowed for the current period.
:paramtype limit: float
:keyword period: The time period in which the quota's underlying meter is accumulated. Based on
calendar year. 'None' is used for concurrent quotas. Known values are: "None", "Monthly".
:paramtype period: str or ~azure.quantum._client.models.MeterPeriod
"""
super(Quota, self).__init__(**kwargs)
self.dimension = dimension
self.scope = scope
self.provider_id = provider_id
self.utilization = utilization
self.holds = holds
self.limit = limit
self.period = period
class QuotaList(msrest.serialization.Model):
"""List of quotas.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.quantum._client.models.Quota]
:ivar next_link: Link to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Quota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(QuotaList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RestError(msrest.serialization.Model):
"""Error information returned by the API.
:ivar error: An error response from Azure.
:vartype error: ~azure.quantum._client.models.ErrorData
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorData'},
}
def __init__(
self,
*,
error: Optional["_models.ErrorData"] = None,
**kwargs
):
"""
:keyword error: An error response from Azure.
:paramtype error: ~azure.quantum._client.models.ErrorData
"""
super(RestError, self).__init__(**kwargs)
self.error = error
class SasUriResponse(msrest.serialization.Model):
"""Get SAS URL operation response.
:ivar sas_uri: A URL with a SAS token to upload a blob for execution in the given workspace.
:vartype sas_uri: str
"""
_attribute_map = {
'sas_uri': {'key': 'sasUri', 'type': 'str'},
}
def __init__(
self,
*,
sas_uri: Optional[str] = None,
**kwargs
):
"""
:keyword sas_uri: A URL with a SAS token to upload a blob for execution in the given workspace.
:paramtype sas_uri: str
"""
super(SasUriResponse, self).__init__(**kwargs)
self.sas_uri = sas_uri
class TargetStatus(msrest.serialization.Model):
"""Target status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Target id.
:vartype id: str
:ivar current_availability: Target availability. Known values are: "Available", "Degraded",
"Unavailable".
:vartype current_availability: str or ~azure.quantum._client.models.TargetAvailability
:ivar average_queue_time: Average queue time in seconds.
:vartype average_queue_time: long
:ivar status_page: A page with detailed status of the provider.
:vartype status_page: str
"""
_validation = {
'id': {'readonly': True},
'current_availability': {'readonly': True},
'average_queue_time': {'readonly': True},
'status_page': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'current_availability': {'key': 'currentAvailability', 'type': 'str'},
'average_queue_time': {'key': 'averageQueueTime', 'type': 'long'},
'status_page': {'key': 'statusPage', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(TargetStatus, self).__init__(**kwargs)
self.id = None
self.current_availability = None
self.average_queue_time = None
self.status_page = None
class UsageEvent(msrest.serialization.Model):
"""Usage event details.
:ivar dimension_id: The dimension id.
:vartype dimension_id: str
:ivar dimension_name: The dimension name.
:vartype dimension_name: str
:ivar measure_unit: The unit of measure.
:vartype measure_unit: str
:ivar amount_billed: The amount billed.
:vartype amount_billed: float
:ivar amount_consumed: The amount consumed.
:vartype amount_consumed: float
:ivar unit_price: The unit price.
:vartype unit_price: float
"""
_attribute_map = {
'dimension_id': {'key': 'dimensionId', 'type': 'str'},
'dimension_name': {'key': 'dimensionName', 'type': 'str'},
'measure_unit': {'key': 'measureUnit', 'type': 'str'},
'amount_billed': {'key': 'amountBilled', 'type': 'float'},
'amount_consumed': {'key': 'amountConsumed', 'type': 'float'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
}
def __init__(
self,
*,
dimension_id: Optional[str] = None,
dimension_name: Optional[str] = None,
measure_unit: Optional[str] = None,
amount_billed: Optional[float] = None,
amount_consumed: Optional[float] = None,
unit_price: Optional[float] = None,
**kwargs
):
"""
:keyword dimension_id: The dimension id.
:paramtype dimension_id: str
:keyword dimension_name: The dimension name.
:paramtype dimension_name: str
:keyword measure_unit: The unit of measure.
:paramtype measure_unit: str
:keyword amount_billed: The amount billed.
:paramtype amount_billed: float
:keyword amount_consumed: The amount consumed.
:paramtype amount_consumed: float
:keyword unit_price: The unit price.
:paramtype unit_price: float
"""
super(UsageEvent, self).__init__(**kwargs)
self.dimension_id = dimension_id
self.dimension_name = dimension_name
self.measure_unit = measure_unit
self.amount_billed = amount_billed
self.amount_consumed = amount_consumed
self.unit_price = unit_price
|
2c904eb153865a9b07d583db909f4a0b14375edb
|
92ae735d5dc6f6a094daedbd32614e714d0b8c4a
|
/registration/tests/admin_approval_backend.py
|
1a7b19536186f2c7536cf6d7ea6e7a3975004e21
|
[
"MIT"
] |
permissive
|
Williano/Final-Senior-Year-Project-
|
3b01ac9fd85753720b01c2245cf9b71648aad35d
|
4bd988575537b37b5cf852b616d3db5666c95e7f
|
refs/heads/master
| 2023-08-07T16:11:42.778492
| 2023-06-05T04:59:06
| 2023-06-05T04:59:06
| 121,346,340
| 173
| 60
|
MIT
| 2023-06-05T04:59:07
| 2018-02-13T06:17:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
admin_approval_backend.py
|
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from .default_backend import DefaultBackendViewTests
from registration.backends.admin_approval.views import RegistrationView
from registration.models import SupervisedRegistrationProfile
from registration.users import UserModel
@override_settings(ROOT_URLCONF='test_app.urls_admin_approval')
class AdminApprovalBackendViewTests(DefaultBackendViewTests):
"""
Test the admin_approval registration backend.
Running these tests successfully will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
registration_profile = SupervisedRegistrationProfile
registration_view = RegistrationView
def test_approval(self):
"""
Approval of an account functions properly.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = self.registration_profile.objects.get(user__username='bob')
resp = self.client.get(
reverse('registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}))
admin_user = UserModel().objects.create_superuser('admin', 'admin@test.com', 'admin')
self.client.login(username=admin_user.username, password=admin_user)
resp = self.client.get(
reverse('registration_admin_approve',
args=(),
kwargs={'profile_id': profile.id}))
user = profile.user
# fail if the user is active (this should not happen yet)
self.failIf(not user.is_active)
self.assertRedirects(resp, reverse('registration_approve_complete'))
|
7951aa8cb5972d99308de5e0995763504856d744
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/scripts/dump_command_table.py
|
a4c70bcea78f4dbf81215d485003390ba53ad108
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
dump_command_table.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
import inspect
import json
import re
import types
import sys
from azure.cli.core.application import APPLICATION, Application
class Exporter(json.JSONEncoder):
def default(self, o):#pylint: disable=method-hidden
try:
return super(Exporter, self).default(o)
except TypeError:
return str(o)
def _dump_command_table(**kwargs):
cmd_table = APPLICATION.configuration.get_command_table()
cmd_list = []
if cmd_set_names is None :
# if no command prefix specified, use all command table entries
cmd_list = list(cmd_table.keys())
else:
# if the command name matches a prefix, add it to the output list
for name in cmd_table.keys():
for prefix in cmd_set_names:
if name.startswith(prefix):
cmd_list.append(name)
break
filtered_cmd_list = []
param_dict = {k : [] for k in param_names} if param_names else {}
if param_names:
for cmd_name in cmd_list:
table_entry = cmd_table[cmd_name]
table_entry.arguments.update(table_entry.arguments_loader())
cmd_args = list(table_entry.arguments.keys())
for arg in cmd_args:
if arg in param_names:
param_dict[arg].append(cmd_name)
if cmd_name not in filtered_cmd_list:
filtered_cmd_list.append(cmd_name)
else:
filtered_cmd_list = cmd_list
table_entries = []
for cmd_name in filtered_cmd_list:
table_entry = cmd_table[cmd_name]
table_entry.arguments.update(table_entry.arguments_loader())
table_entries.append(_format_entry(cmd_table[cmd_name]))
# output results to STDOUT
result_dict = {'commands': table_entries}
print(json.dumps(result_dict, indent=2, sort_keys=True))
# display summary info with STDERR
print('\n===RESULTS===', file=sys.stderr)
print('{} commands dumped within {} scope with {} parameters'.format(len(table_entries),
cmd_set_names or '*', param_names or 'ANY'), file=sys.stderr)
for param, commands in param_dict.items():
print('\nPARAM: "{}" - {} commands - scope "{}" - {}'.format(
param, len(commands), _get_parameter_scope(param, commands), commands), file=sys.stderr)
sys.exit(0)
def _format_entry(obj):
if not obj:
return obj
elif isinstance(obj, tuple):
return [_format_entry(x) for x in list(obj)]
elif isinstance(obj, PRIMITIVES):
return obj
elif isinstance(obj, types.FunctionType):
return 'function <{}>'.format(obj.__name__)
elif callable(obj):
return 'callable {}'.format(type(obj))
elif isinstance(obj, dict):
new_dict = {key: _format_entry(obj[key]) for key in obj.keys() if key not in IGNORE_ARGS}
_process_null_values(new_dict)
return new_dict
elif isinstance(obj, list):
new_list = [_format_entry(x) for x in obj]
return new_list
else:
new_dict = {key: _format_entry(value) for key, value in vars(obj).items() if key not in IGNORE_ARGS}
_process_null_values(new_dict)
return new_dict
def _get_parameter_scope(param, cmd_list):
if not cmd_list:
return 'N/A (NOT FOUND)'
test_list = cmd_list[0].split(' ')
while len(test_list) > 0:
test_entry = ' '.join(test_list)
all_match = True
for entry in cmd_list[1:]:
if test_entry not in entry:
all_match = False
break
if not all_match:
test_list.pop()
else:
return test_entry
return '_ROOT_'
def _process_null_values(dict_):
if hide_nulls:
null_values = [x for x in dict_.keys() if dict_[x] is None]
for key in null_values:
dict_.pop(key)
def _dashed_to_camel(string):
return string.replace('-', '_')
parser = argparse.ArgumentParser(description='Command Table Parser')
parser.add_argument('--commands', metavar='N', nargs='+', help='Filter by first level command (OR)')
parser.add_argument('--params', metavar='N', nargs='+', help='Filter by parameters (OR)')
parser.add_argument('--hide-nulls', action='store_true', default=False, help='Show null entries')
args = parser.parse_args()
cmd_set_names = args.commands
param_names = [_dashed_to_camel(x) for x in args.params or []]
hide_nulls = args.hide_nulls
PRIMITIVES = (str, int, bool, float)
IGNORE_ARGS = ['help', 'help_file', 'base_type', 'arguments_loader']
APPLICATION.register(Application.COMMAND_PARSER_LOADED, _dump_command_table)
APPLICATION.execute([])
|
f8aca30f66c4c8b8e5c4c300dc10429d999a14aa
|
94c0d1574ad8ba81a1ef0d48020b92ba681a5c6a
|
/snakePipes/workflows/createIndices/createIndices
|
83f12d704824286bd477a98d07341f369ab6a9fa
|
[
"MIT"
] |
permissive
|
maxplanck-ie/snakepipes
|
650de654c8bb6b197743d5bb59628df2d91d3a79
|
6144e3fdc1bdaa26e05b1cb234df7414c61e283a
|
refs/heads/master
| 2023-09-05T09:25:33.130890
| 2023-06-05T13:38:57
| 2023-06-05T13:38:57
| 54,579,435
| 318
| 91
|
MIT
| 2023-08-22T12:07:49
| 2016-03-23T17:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 6,305
|
createIndices
|
#!/usr/bin/env python3
__description__ = """
Create indices for use by snakePipes. A YAML file will be created by default in the default location where snakePipes looks for organism YAML files.
usage example:
createIndices -o output-dir --genome ftp://ftp.ensembl.org/pub/release-93/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna_sm.primary_assembly.fa.gz --gtf ftp://ftp.ensembl.org/pub/release-93/gtf/mus_musculus/Mus_musculus.GRCm38.93.gtf.gz --blacklist blacklist.bed --ignoreForNormalization ignore.txt GRCm38_release93
"""
import argparse
import os
import sys
import textwrap
import snakePipes.common_functions as cf
import snakePipes.parserCommon as parserCommon
def parse_args(defaults={"configFile": None, "clusterConfigFile": None,
"maxJobs": 5, "snakemakeOptions": "--use-conda",
"tempDir": None, "verbose": False, "spikeinExt": None}):
"""
Parse arguments from the command line.
"""
mainArgs = parserCommon.mainArguments(defaults, createIndices=True)
parser = argparse.ArgumentParser(
prog=sys.argv[0],
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__description__),
parents=[mainArgs],
add_help=False
)
parser.add_argument("genome", metavar="GENOME", help="The name to save this genome as. No spaces or special characters! Specifying an organism that already exists will cause the old information to be overwritten. See also the --userYAML option.")
# Required arguments, which already exists as an argument group
required = [grp for grp in parser._action_groups if grp.title == 'Required Arguments'][0]
required.add_argument("--genomeURL",
required=True,
help="URL or local path to where the genome fasta file is located. The file may optionally be gzipped.")
required.add_argument("--gtfURL",
help="URL or local path to where the genome annotation in GTF format is located. GFF is NOT supported. The file may optionally be gzipped. If this file is not specified, then RNA-seq related tools will NOT be usable.")
# Workflow options
optional = parser.add_argument_group('Options')
optional.add_argument("--spikeinGenomeURL",
help="URL or local path to where the spikein genome fasta file is located. The file may optionally be gzipped.")
optional.add_argument("--spikeinGtfURL",
help="URL or local path to where the spikein genome annotation in GTF format is located. GFF is NOT supported. The file may optionally be gzipped.")
optional.add_argument("--spikeinExt",
dest="spikeinExt",
help="Extention of spikein chromosome names in the hybrid genome. (default: '%(default)s') .",
default=defaults["spikeinExt"])
optional.add_argument("--tools",
help="Only produce indices for the following tools (by default, all indices will be created). The default is 'all'. 'none' will create everything except aligner indices.",
default="all",
nargs="+",
choices=['all', 'bowtie2', 'hisat2', 'bwa', 'bwa-mem2', 'bwameth', 'bwameth2', 'star', 'none'])
optional.add_argument("--effectiveGenomeSize",
type=int,
help="The effective genome size. If you don't specify a value then the number of non-N bases will be used.")
optional.add_argument("--spikeinBlacklist",
help="An optional URL or local path to a file to use to blacklist spikein organism regions (such as that provided by the ENCODE consortium).")
optional.add_argument("--blacklist",
help="An optional URL or local path to a file to use to blacklist regions (such as that provided by the ENCODE consortium).")
optional.add_argument("--ignoreForNormalization",
help="An optional file list, with one entry per line, the chromosomes to ignore during normalization. These are typically sex chromosomes, mitochondrial DNA, and unplaced contigs.")
optional.add_argument("--rmskURL",
help="URL or local path to where the repeat masker output file is located. This is only required if you plan to run the non-coding RNA-seq workflow.")
optional.add_argument("--userYAML",
action="store_true",
help="By default, this workflow creates an organism YAML file where snakePipes will look for it by default. If this isn't desired (e.g., you don't want the organism to be selectable by default or you don't have write permissions to the snakePipes installation) you can specify this option and the YAML file will instead be created in the location specified by the `-o` option.")
return parser
def main():
baseDir, workflowDir, defaults = cf.setDefaults(os.path.basename(__file__))
# get command line arguments
parser = parse_args(defaults)
args = parser.parse_args()
args, defaults = cf.handleUserArgs(args, defaults, parse_args)
# we also add these paths to config, although we don't use them in the Snakefile
args.baseDir = baseDir
# Common arguments
cf.checkCommonArguments(args, baseDir, outDir=True, createIndices=True)
### Workflow-specific arguments
if args.ignoreForNormalization:
args.ignoreForNormalization = os.path.abspath(args.ignoreForNormalization)
if not os.path.exists(args.ignoreForNormalization):
sys.exit("The file specified by `--ignoreForNormalization` does not exist!\n")
if args.blacklist:
if os.path.exists(args.blacklist):
args.blacklist = os.path.abspath(args.blacklist)
###
# Handle YAML and log files
snakemake_cmd = cf.commonYAMLandLogs(baseDir, workflowDir, defaults, args, __file__)
logfile_name = cf.logAndExport(args, os.path.basename(__file__))
# Run everything
cf.runAndCleanup(args, snakemake_cmd, logfile_name)
#CreateDAG
cf.print_DAG(args,snakemake_cmd, __file__,defaults)
if __name__ == "__main__":
main()
|
|
ca7e6ff1e5c08b602c944f75e92862c87f7ba7e6
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/io/gcp/datastore/v1new/datastore_write_it_test.py
|
abecd5b6a4cfbfcb22621fdaef6473c0fce3e45d
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
datastore_write_it_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An integration test for datastore_write_it_pipeline
This test creates entities and writes them to Cloud Datastore. Subsequently,
these entities are read from Cloud Datastore, compared to the expected value
for the entity, and deleted.
There is no output; instead, we use `assert_that` transform to verify the
results in the pipeline.
"""
# pytype: skip-file
import logging
import random
import unittest
from datetime import datetime
import pytest
from hamcrest.core.core.allof import all_of
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp.datastore.v1new import datastore_write_it_pipeline
except ImportError:
datastore_write_it_pipeline = None # type: ignore
class DatastoreWriteIT(unittest.TestCase):
NUM_ENTITIES = 1001
LIMIT = 500
def run_datastore_write(self, limit=None):
test_pipeline = TestPipeline(is_integration_test=True)
current_time = datetime.now().strftime("%m%d%H%M%S")
seed = random.randint(0, 100000)
kind = 'testkind%s%d' % (current_time, seed)
pipeline_verifiers = [PipelineStateMatcher()]
extra_opts = {
'kind': kind,
'num_entities': self.NUM_ENTITIES,
'on_success_matcher': all_of(*pipeline_verifiers)
}
if limit is not None:
extra_opts['limit'] = limit
datastore_write_it_pipeline.run(
test_pipeline.get_full_options_as_args(**extra_opts))
@pytest.mark.it_postcommit
@unittest.skipIf(
datastore_write_it_pipeline is None, 'GCP dependencies are not installed')
def test_datastore_write_limit(self):
self.run_datastore_write(limit=self.LIMIT)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
9642e3fc538589c9556d35d4b72738221f9e12e9
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_scipy_numpy/source/numpy/distutils/tests/swig_ext/setup.py
|
f6e07303bea646ba68664dc87dad914817f03715
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 742
|
py
|
setup.py
|
#!/usr/bin/env python
from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('swig_ext', parent_package, top_path)
config.add_extension('_example',
['src/example.i', 'src/example.c']
)
config.add_extension('_example2',
['src/zoo.i', 'src/zoo.cc'],
depends=['src/zoo.h'],
include_dirs=['src']
)
config.add_data_dir('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
|
3db100920fe559b89bc8f737b0ba6df18ddf47c0
|
50203b4a349dcb2ed1e72c9f5463d84db8a6e983
|
/skyline/functions/pandas/csv_to_timeseries.py
|
92a2f3ade7079763c4e36a324ff5161b76a3a462
|
[
"MIT"
] |
permissive
|
earthgecko/skyline
|
97e43df824d7c92d68086f529f0f3d051a7debb0
|
c2edc451e63d5eb57117ddcfbc6e79100e706460
|
refs/heads/master
| 2023-08-30T08:36:50.740285
| 2023-06-28T15:33:47
| 2023-06-28T15:33:47
| 20,475,900
| 482
| 74
|
NOASSERTION
| 2023-06-28T15:33:49
| 2014-06-04T08:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
csv_to_timeseries.py
|
"""
csv_to_timeseries.py
"""
import logging
import traceback
from os import path
import numpy as np
import pandas as pd
# @added 20221204 - Feature #4754: csv_to_timeseries
# Feature #4734: mirage_vortex
# Branch #4728: vortex
def csv_to_timeseries(current_skyline_app, csv_file, log=True):
"""
Convert a csv to a timeseries list. csv format must be:
timestamp,value
1670147592,1
1670147652,2
1670147712,3
Or
date,value
2022-12-04 09:54:22,1
2022-12-04 09:55:22,2
2022-12-04 09:56:22,3
If dates are passed they are coerced into UTC unix timestamps.
:param current_skyline_app: the app calling the function so the function
knows which log to write too.
:param csv_file: the csv file with timeseries data
:param log: whether to log or not, optional, defaults to False
:type current_skyline_app: str
:type csv_file: str
:type log: boolean
:return: timeseries
:rtype: list
"""
if log:
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
else:
current_logger = None
function_str = 'functions.pandas.csv_to_timeseries'
timeseries = []
if not path.isfile(csv_file):
current_logger.error('error :: %s :: file not found - %s' % (
function_str, str(csv_file)))
return timeseries
df = None
try:
df = pd.read_csv(csv_file, header=0)
if log:
current_logger.info('%s :: pandas dataframe created from csv_file - %s' % (
function_str, csv_file))
except Exception as err:
if not log:
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: failed to read %s to DataFrame - %s' % (
function_str, csv_file, err))
return timeseries
try:
df.rename(columns={df.columns[0]: 'timestamp'}, inplace=True)
unix_timestamps = False
if isinstance(df['timestamp'][0], (np.int64, np.float64)):
unix_timestamps = True
if log:
current_logger.info('%s :: timestamps are type: %s' % (
function_str, str(type(df['timestamp'][0]))))
if unix_timestamps:
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
else:
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['timestamp'] = df.timestamp.values.astype(np.int64) // 10 ** 9
timeseries = df.values.tolist()
timeseries = [[int(t), v] for t, v in timeseries]
if log:
current_logger.info('%s :: converted to timeseries of length: %s' % (
function_str, str(len(timeseries))))
except Exception as err:
if not log:
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: failed to convert DataFrame to timeseries - %s' % (
function_str, err))
return 'error: ' + str(err)
return timeseries
|
b9248f147e2ae4732752a92a201b73fa28b594e8
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/states/test_boto_iam_role.py
|
0a577e569785dba32ab38afba4333dec775a0392
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,436
|
py
|
test_boto_iam_role.py
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.boto_iam_role as boto_iam_role
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {boto_iam_role: {}}
def test_present():
"""
Test to ensure the IAM role exists.
"""
name = "myrole"
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
_desc_role = {
"create_date": "2015-02-11T19:47:14Z",
"role_id": "HIUHBIUBIBNKJNBKJ",
"assume_role_policy_document": {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {"Service": "ec2.amazonaws.com"},
"Effect": "Allow",
}
],
},
"role_name": "myfakerole",
"path": "/",
"arn": "arn:aws:iam::12345:role/myfakerole",
}
_desc_role2 = {
"create_date": "2015-02-11T19:47:14Z",
"role_id": "HIUHBIUBIBNKJNBKJ",
"assume_role_policy_document": {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": ["ec2.amazonaws.com", "datapipeline.amazonaws.com"]
},
"Effect": "Allow",
}
],
},
"role_name": "myfakerole",
"path": "/",
"arn": "arn:aws:iam::12345:role/myfakerole",
}
mock_desc = MagicMock(
side_effect=[False, _desc_role, _desc_role, _desc_role2, _desc_role]
)
_build_policy = {
"Version": "2008-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
}
],
}
mock_policy = MagicMock(return_value=_build_policy)
mock_ipe = MagicMock(side_effect=[False, True, True, True])
mock_pa = MagicMock(side_effect=[False, True, True, True])
mock_bool = MagicMock(return_value=False)
mock_lst = MagicMock(return_value=[])
with patch.dict(
boto_iam_role.__salt__,
{
"boto_iam.describe_role": mock_desc,
"boto_iam.create_role": mock_bool,
"boto_iam.build_policy": mock_policy,
"boto_iam.update_assume_role_policy": mock_bool,
"boto_iam.instance_profile_exists": mock_ipe,
"boto_iam.list_attached_role_policies": mock_lst,
"boto_iam.create_instance_profile": mock_bool,
"boto_iam.profile_associated": mock_pa,
"boto_iam.associate_profile_to_role": mock_bool,
"boto_iam.list_role_policies": mock_lst,
},
):
with patch.dict(boto_iam_role.__opts__, {"test": False}):
comt = " Failed to create {} IAM role.".format(name)
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. Failed to create myrole instance profile."
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = (
" myrole role present. Failed to associate myrole"
" instance profile with myrole role."
)
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. Failed to update assume role policy."
ret.update({"comment": comt})
assert boto_iam_role.present(name) == ret
comt = " myrole role present. "
ret.update({"comment": comt, "result": True})
assert boto_iam_role.present(name) == ret
def test_absent():
"""
Test to ensure the IAM role is deleted.
"""
name = "myrole"
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
mock = MagicMock(
side_effect=[
["mypolicy"],
["mypolicy"],
False,
True,
False,
False,
True,
False,
False,
False,
True,
]
)
mock_bool = MagicMock(return_value=False)
mock_lst = MagicMock(return_value=[])
with patch.dict(
boto_iam_role.__salt__,
{
"boto_iam.list_role_policies": mock,
"boto_iam.delete_role_policy": mock_bool,
"boto_iam.profile_associated": mock,
"boto_iam.disassociate_profile_from_role": mock_bool,
"boto_iam.instance_profile_exists": mock,
"boto_iam.list_attached_role_policies": mock_lst,
"boto_iam.delete_instance_profile": mock_bool,
"boto_iam.role_exists": mock,
"boto_iam.delete_role": mock_bool,
},
):
with patch.dict(boto_iam_role.__opts__, {"test": False}):
comt = " Failed to add policy mypolicy to role myrole"
ret.update(
{
"comment": comt,
"changes": {
"new": {"policies": ["mypolicy"]},
"old": {"policies": ["mypolicy"]},
},
}
)
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. Failed to disassociate "
"myrole instance profile from myrole role."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. "
" Failed to delete myrole instance profile."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
comt = (
" No policies in role myrole."
" No attached policies in role myrole. myrole instance profile "
"does not exist. Failed to delete myrole iam role."
)
ret.update({"comment": comt, "changes": {}})
assert boto_iam_role.absent(name) == ret
|
773ee50dea8edac92f8ab479128cc9abca28b6c9
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/tests/unit/cfngin/factories.py
|
64dfaab9a78c62603590f51b63ea3e2cff09452e
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
factories.py
|
"""Factories for tests."""
# pylint: disable=unused-argument
# pyright: basic
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, NamedTuple, Optional
from mock import MagicMock
from runway.cfngin.providers.aws.default import ProviderBuilder
from runway.config import CfnginConfig, CfnginStackDefinitionModel
from runway.context import CfnginContext
if TYPE_CHECKING:
from runway.cfngin.providers.aws.default import Provider
class Lookup(NamedTuple):
"""Lookup named tuple."""
type: str
input: str
raw: str
class MockThreadingEvent:
"""Mock thread events."""
def wait(self, timeout: Optional[int] = None) -> bool:
"""Mock wait method."""
return False
class MockProviderBuilder(ProviderBuilder):
"""Mock provider builder."""
def __init__( # pylint: disable=super-init-not-called
self, *, provider: Provider, region: Optional[str] = None, **_: Any
) -> None:
"""Instantiate class."""
self.provider = provider
self.region = region
def build(
self, *, profile: Optional[str] = None, region: Optional[str] = None
) -> Provider:
"""Mock build method."""
return self.provider
def mock_provider(**kwargs: Any) -> MagicMock:
"""Mock provider."""
return MagicMock(**kwargs)
def mock_context(
namespace: str = "default",
extra_config_args: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> CfnginContext:
"""Mock context."""
config_args = {"namespace": namespace}
if extra_config_args:
config_args.update(extra_config_args)
config = CfnginConfig.parse_obj(config_args)
if kwargs.get("environment"):
return CfnginContext(config=config, **kwargs)
return CfnginContext(config=config, environment={}, **kwargs)
def generate_definition(
base_name: str, stack_id: Any = None, **overrides: Any
) -> CfnginStackDefinitionModel:
"""Generate definitions."""
definition: Dict[str, Any] = {
"name": f"{base_name}-{stack_id}" if stack_id else base_name,
"class_path": f"tests.unit.cfngin.fixtures.mock_blueprints.{base_name.upper()}",
"requires": [],
}
definition.update(overrides)
return CfnginStackDefinitionModel(**definition)
def mock_lookup(
lookup_input: Any, lookup_type: str, raw: Optional[str] = None
) -> Lookup:
"""Mock lookup."""
if raw is None:
raw = f"{lookup_type} {lookup_input}"
return Lookup(type=lookup_type, input=lookup_input, raw=raw)
class SessionStub:
"""Stubber class for boto3 sessions made with session_cache.get_session().
This is a helper class that should be used when trying to stub out
get_session() calls using the boto3.stubber.
Example Usage:
@mock.patch('runway.cfngin.lookups.handlers.myfile.get_session',
return_value=sessionStub(client))
def myfile_test(self, client_stub):
...
Attributes:
client_stub (:class:`boto3.session.Session`:): boto3 session stub
"""
def __init__(self, client_stub: Any):
"""Instantiate class."""
self.client_stub = client_stub
def client(self, region: str) -> Any:
"""Return the stubbed client object.
Args:
region: So boto3 won't complain
Returns:
The stubbed boto3 session
"""
return self.client_stub
|
10407206270dbd66cef2dd5f9c63d37550bb6750
|
1189274756491642c85736dea8bd0c04ef71635e
|
/utils/utils.py
|
520d7ad490e658d78caf5fa0047333733dd1b7f9
|
[] |
no_license
|
sidroopdaska/SelfDrivingRCCar
|
ff5b157fab692550f0e0df59c8f1654a9ceb4046
|
3b973da5310cec4b7d950c7c8b7fb99e429b4ee1
|
refs/heads/master
| 2023-06-17T07:42:59.395132
| 2021-07-16T10:14:06
| 2021-07-16T10:14:06
| 87,363,975
| 124
| 45
| null | 2021-07-16T10:14:07
| 2017-04-05T22:58:53
|
Python
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
utils.py
|
import serial
import serial.tools.list_ports
arduino_serial_number = '75237333536351F0F0C1'
# server_address_home = ('192.168.0.88', 45713)
server_address_corp = ('10.72.76.108', 45713)
server_address = server_address_corp
rpi = "10.104.66.208', 49214"
def find_arduino(serial_number):
for p in serial.tools.list_ports.comports():
if p.serial_number == serial_number:
return serial.Serial(p.device)
raise IOError("Could not find the Arduino - is it plugged in!")
|
79d5f3f7eed97c64ec9e93763e7b0c9bd2f90bb4
|
3e1f6dfde5c940f7acde208d098e56a54550945f
|
/dash-user-guide-components/_validate_init.py
|
f519d2736b389bb7d0bec686ca379e240d39b429
|
[
"MIT"
] |
permissive
|
plotly/dash-docs
|
a4d1b9e450aa19e811f8ae043fd56de330cce63a
|
f494e987701be1085ba9fb7b29bd875ee2146d5b
|
refs/heads/master
| 2023-08-03T02:18:16.257115
| 2021-12-14T18:51:52
| 2021-12-14T18:51:52
| 84,095,619
| 396
| 210
|
MIT
| 2023-01-18T20:29:56
| 2017-03-06T16:30:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
_validate_init.py
|
"""
DO NOT MODIFY
This file is used to validate your publish settings.
"""
from __future__ import print_function
import os
import sys
import importlib
components_package = 'dash_user_guide_components'
components_lib = importlib.import_module(components_package)
missing_dist_msg = 'Warning {} was not found in `{}.__init__.{}`!!!'
missing_manifest_msg = '''
Warning {} was not found in `MANIFEST.in`!
It will not be included in the build!
'''
with open('MANIFEST.in', 'r') as f:
manifest = f.read()
def check_dist(dist, filename):
# Support the dev bundle.
if filename.endswith('dev.js'):
return True
return any(
filename in x
for d in dist
for x in (
[d.get('relative_package_path')]
if not isinstance(d.get('relative_package_path'), list)
else d.get('relative_package_path')
)
)
def check_manifest(filename):
return filename in manifest
def check_file(dist, filename):
if not check_dist(dist, filename):
print(
missing_dist_msg.format(filename, components_package, '_js_dist'),
file=sys.stderr
)
if not check_manifest(filename):
print(missing_manifest_msg.format(filename),
file=sys.stderr)
for cur, _, files in os.walk(components_package):
for f in files:
if f.endswith('js'):
# noinspection PyProtectedMember
check_file(components_lib._js_dist, f)
elif f.endswith('css'):
# noinspection PyProtectedMember
check_file(components_lib._css_dist, f)
elif not f.endswith('py'):
check_manifest(f)
|
1745fdde7f1fe89be3da1d0a00d6378eb2e6c774
|
a133a7c64f6e08def0f936898466990d1fd1b31f
|
/atomate/lammps/fireworks/__init__.py
|
ab4172e2a37509f05c1fa6dcb20972159a562780
|
[
"LicenseRef-scancode-hdf5",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
hackingmaterials/atomate
|
a6458f9323b8f14d7b4ebb6558fb578d50a3f1ed
|
f4060e55ae3a22289fde9516ff0e8e4ac1d22190
|
refs/heads/main
| 2023-08-07T21:53:24.701157
| 2023-07-25T22:28:06
| 2023-07-25T22:28:06
| 43,023,379
| 217
| 173
|
NOASSERTION
| 2023-08-25T22:09:48
| 2015-09-23T19:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
__init__.py
|
# from .core import LammpsForceFieldFW, LammpsFW, PackmolFW
|
41c268eaf3943850126b4557027f5b21c7c1a744
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/modules/operators/__init__.py
|
55d8e4bac266b0b61fa26fef2898eb7111dc8baf
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 131
|
py
|
__init__.py
|
from .conv import *
from .cell import *
from .mix_ops import *
from .prune import *
from .ops import *
from .prune_filter import *
|
470b8a9805935813b2a0d37912f0a120d7133e1f
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/audio/mixxx22/files/patch-build_qt5.py
|
d0927978bcd756e11892bb4cd120644d253a235d
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
patch-build_qt5.py
|
--- build/qt5.py.orig 2020-05-15 21:26:10 UTC
+++ build/qt5.py
@@ -43,7 +43,7 @@ import SCons.Scanner
import SCons.Tool
import SCons.Util
-class ToolQt5Warning(SCons.Warnings.Warning):
+class ToolQt5Warning(SCons.Warnings.SConsWarning):
pass
class GeneratedMocFileNotIncluded(ToolQt5Warning):
|
e1ff48a47215ea2f054ae0d0c01cf9fa1f35fc8a
|
78ce285eff8566e220a76626aaab0f9e6c2b1c80
|
/python/surf/protocols/batcher/__init__.py
|
eeab102755138df1f4d8f080a3fbd4d0f60bf5d5
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
slaclab/surf
|
315dfa6c866e34a01befe780e3a8dbb8caf434ee
|
c9f42cecda8db19e971a0b33a2add6ff0ea08eff
|
refs/heads/master
| 2023-08-17T23:10:12.674166
| 2023-07-22T01:21:03
| 2023-07-22T01:21:03
| 66,395,341
| 220
| 56
|
NOASSERTION
| 2023-09-01T17:21:33
| 2016-08-23T19:06:56
|
VHDL
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
__init__.py
|
from surf.protocols.batcher._AxiStreamBatcherAxil import *
from surf.protocols.batcher._AxiStreamBatcherEventBuilder import *
|
30317d461950b37698480ed05b4da6ed76dec58a
|
c703b8ac3b5545857f6c95efa2d61eaf7a664021
|
/iPERCore/tools/human_digitalizer/smplx/joint_names.py
|
5738efb4bd3fec362d0b39278fc9055ddddc54e6
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iPERDance/iPERCore
|
d29681d229b3098b3517b1abf4f7ea65f579de73
|
fcf9a18ffd66bf3fdd3eea4153a3bc4785131848
|
refs/heads/main
| 2023-07-30T15:04:15.835396
| 2023-04-12T14:21:23
| 2023-04-12T14:21:23
| 313,664,064
| 2,520
| 339
|
Apache-2.0
| 2023-05-12T03:26:52
| 2020-11-17T15:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,897
|
py
|
joint_names.py
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
JOINT_NAMES = [
'pelvis',
'left_hip',
'right_hip',
'spine1',
'left_knee',
'right_knee',
'spine2',
'left_ankle',
'right_ankle',
'spine3',
'left_foot',
'right_foot',
'neck',
'left_collar',
'right_collar',
'head',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'jaw',
'left_eye_smplhf',
'right_eye_smplhf',
'left_index1',
'left_index2',
'left_index3',
'left_middle1',
'left_middle2',
'left_middle3',
'left_pinky1',
'left_pinky2',
'left_pinky3',
'left_ring1',
'left_ring2',
'left_ring3',
'left_thumb1',
'left_thumb2',
'left_thumb3',
'right_index1',
'right_index2',
'right_index3',
'right_middle1',
'right_middle2',
'right_middle3',
'right_pinky1',
'right_pinky2',
'right_pinky3',
'right_ring1',
'right_ring2',
'right_ring3',
'right_thumb1',
'right_thumb2',
'right_thumb3',
'nose',
'right_eye',
'left_eye',
'right_ear',
'left_ear',
'left_big_toe',
'left_small_toe',
'left_heel',
'right_big_toe',
'right_small_toe',
'right_heel',
'left_thumb',
'left_index',
'left_middle',
'left_ring',
'left_pinky',
'right_thumb',
'right_index',
'right_middle',
'right_ring',
'right_pinky',
'right_eye_brow1',
'right_eye_brow2',
'right_eye_brow3',
'right_eye_brow4',
'right_eye_brow5',
'left_eye_brow5',
'left_eye_brow4',
'left_eye_brow3',
'left_eye_brow2',
'left_eye_brow1',
'nose1',
'nose2',
'nose3',
'nose4',
'right_nose_2',
'right_nose_1',
'nose_middle',
'left_nose_1',
'left_nose_2',
'right_eye1',
'right_eye2',
'right_eye3',
'right_eye4',
'right_eye5',
'right_eye6',
'left_eye4',
'left_eye3',
'left_eye2',
'left_eye1',
'left_eye6',
'left_eye5',
'right_mouth_1',
'right_mouth_2',
'right_mouth_3',
'mouth_top',
'left_mouth_3',
'left_mouth_2',
'left_mouth_1',
'left_mouth_5', # 59 in OpenPose output
'left_mouth_4', # 58 in OpenPose output
'mouth_bottom',
'right_mouth_4',
'right_mouth_5',
'right_lip_1',
'right_lip_2',
'lip_top',
'left_lip_2',
'left_lip_1',
'left_lip_3',
'lip_bottom',
'right_lip_3',
# Face contour
'right_contour_1',
'right_contour_2',
'right_contour_3',
'right_contour_4',
'right_contour_5',
'right_contour_6',
'right_contour_7',
'right_contour_8',
'contour_middle',
'left_contour_8',
'left_contour_7',
'left_contour_6',
'left_contour_5',
'left_contour_4',
'left_contour_3',
'left_contour_2',
'left_contour_1',
]
SMPLH_JOINT_NAMES = [
'pelvis',
'left_hip',
'right_hip',
'spine1',
'left_knee',
'right_knee',
'spine2',
'left_ankle',
'right_ankle',
'spine3',
'left_foot',
'right_foot',
'neck',
'left_collar',
'right_collar',
'head',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_index1',
'left_index2',
'left_index3',
'left_middle1',
'left_middle2',
'left_middle3',
'left_pinky1',
'left_pinky2',
'left_pinky3',
'left_ring1',
'left_ring2',
'left_ring3',
'left_thumb1',
'left_thumb2',
'left_thumb3',
'right_index1',
'right_index2',
'right_index3',
'right_middle1',
'right_middle2',
'right_middle3',
'right_pinky1',
'right_pinky2',
'right_pinky3',
'right_ring1',
'right_ring2',
'right_ring3',
'right_thumb1',
'right_thumb2',
'right_thumb3',
'nose',
'right_eye',
'left_eye',
'right_ear',
'left_ear',
'left_big_toe',
'left_small_toe',
'left_heel',
'right_big_toe',
'right_small_toe',
'right_heel',
'left_thumb',
'left_index',
'left_middle',
'left_ring',
'left_pinky',
'right_thumb',
'right_index',
'right_middle',
'right_ring',
'right_pinky',
]
|
eeee37ffa96a6b28c2d5f781869b9699ecea45b8
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/mako/mako/mako/exceptions.py
|
31c695fd78f18f8f195f326c6aa248aba07fe0bb
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 12,530
|
py
|
exceptions.py
|
# mako/exceptions.py
# Copyright 2006-2022 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import sys
import traceback
from mako import compat
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self, message + _format_filepos(lineno, pos, filename)
)
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self, message + _format_filepos(lineno, pos, filename)
)
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class NameConflictError(MakoException):
"""raised when a reserved word is used inappropriately"""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback:
"""Pull the current exception from the ``sys`` traceback and extracts
Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`.
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
@property
def errorname(self):
return compat.exception_name(self.error)
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = str(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, str):
self.message = str(self.message, "ascii", "replace")
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""Return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating
template.
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""Return the same data as traceback, except in reverse order."""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples,
containing the regular four traceback tuple items, plus the original
template filename, the line number adjusted relative to the template
source, and code line from that line number of the template."""
import mako.template
mods = {}
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
if not line:
line = ""
try:
(line_map, template_lines, template_filename) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = (
info.template_filename or info.template_uri or filename
)
except KeyError:
# A normal .py file (not a Template)
new_trcback.append(
(
filename,
lineno,
function,
line,
None,
None,
None,
None,
)
)
continue
template_ln = 1
mtm = mako.template.ModuleInfo
source_map = mtm.get_module_source_metadata(
module_source, full_line_map=True
)
line_map = source_map["full_line_map"]
template_lines = [
line_ for line_ in template_source.split("\n")
]
mods[filename] = (line_map, template_lines, template_filename)
template_ln = line_map[lineno - 1]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append(
(
filename,
lineno,
function,
line,
template_filename,
template_ln,
template_line,
template_source,
)
)
if not self.source:
for l in range(len(new_trcback) - 1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
if new_trcback:
try:
# A normal .py file (not a Template)
with open(new_trcback[-1][0], "rb") as fp:
encoding = util.parse_encoding(fp)
if not encoding:
encoding = "utf-8"
fp.seek(0)
self.source = fp.read()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ""
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as
applicable.
"""
import mako.template
return mako.template.Template(
r"""
<%page args="error=None, traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(error=error, traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | trim}
% endfor
${tback.errorname}: ${tback.message}
"""
)
def _install_pygments():
global syntax_highlight, pygments_html_formatter
from mako.ext.pygmentplugin import syntax_highlight # noqa
from mako.ext.pygmentplugin import pygments_html_formatter # noqa
def _install_fallback():
global syntax_highlight, pygments_html_formatter
from mako.filters import html_escape
pygments_html_formatter = None
def syntax_highlight(filename="", language=None):
return html_escape
def _install_highlighting():
try:
_install_pygments()
except ImportError:
_install_fallback()
_install_highlighting()
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default ``encoding_errors`` value is
``'htmlentityreplace'``. The template has two options. With the
``full`` option disabled, only a section of an HTML document is
returned. With the ``css`` option disabled, the default stylesheet
won't be included.
"""
import mako.template
return mako.template.Template(
r"""
<%!
from mako.exceptions import RichTraceback, syntax_highlight,\
pygments_html_formatter
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs()}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message|h}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
${lines[index] | syntax_highlight(language='mako')}
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | syntax_highlight(filename)}</div>
</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""",
output_encoding=sys.getdefaultencoding(),
encoding_errors="htmlentityreplace",
)
|
931ab81aa55a50993accedbc3f77e78c22f54734
|
eec259ed9551157fc7d39759be9da014e6b56cd0
|
/python/cuspatial/cuspatial/tests/binpreds/binpred_test_dispatch.py
|
05b38d702c9e580f53641551941c99262bc9cb10
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuspatial
|
b4699ffe73ac217ec40244e231d33675470d3e30
|
a87d21f0cf70116576bf7dbdd42db2bcfd50e7d4
|
refs/heads/branch-23.10
| 2023-08-31T10:49:25.144482
| 2023-08-30T22:02:04
| 2023-08-30T22:02:04
| 199,666,905
| 509
| 136
|
Apache-2.0
| 2023-09-08T21:07:05
| 2019-07-30T14:23:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 15,892
|
py
|
binpred_test_dispatch.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import pytest
from shapely.geometry import LineString, Point, Polygon
import cuspatial
"""Test Dispatch"""
"""This file is used to generate tests for all possible combinations
of geometry types and binary predicates. The tests are generated
using the fixtures defined in this file. The fixtures are combined
in the test function in `test_binpreds_test_dispatch.py` to make
a Tuple: (feature-name, feature-description, feature-lhs,
feature-rhs). The feature-name and feature-descriptions are not used
in the test but are used for development and debugging.
"""
@pytest.fixture(
params=[
"contains",
"geom_equals",
"intersects",
"covers",
"crosses",
"disjoint",
"overlaps",
"touches",
"within",
]
)
def predicate(request):
"""The collection of all supported binary predicates"""
return request.param
"""The fundamental set of tests. This section is dispatched based
on the feature type. Each feature pairing has a specific set of
comparisons that need to be performed to cover the entire test
space. This section contains specific feature representations
that cover all possible geometric combinations."""
point_polygon = Polygon([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)])
features = {
"point-point-disjoint": (
"""Two points apart.""",
Point(0.0, 0.0),
Point(1.0, 0.0),
),
"point-point-equal": (
"""Two points together.""",
Point(0.0, 0.0),
Point(0.0, 0.0),
),
"point-linestring-disjoint": (
"""Point and linestring are disjoint.""",
Point(0.0, 0.0),
LineString([(1.0, 0.0), (2.0, 0.0)]),
),
"point-linestring-point": (
"""Point and linestring share a point.""",
Point(0.0, 0.0),
LineString([(0.0, 0.0), (2.0, 0.0)]),
),
"point-linestring-edge": (
"""Point and linestring intersect.""",
Point(0.5, 0.0),
LineString([(0.0, 0.0), (1.0, 0.0)]),
),
"point-polygon-disjoint": (
"""Point and polygon are disjoint.""",
Point(-0.5, 0.5),
point_polygon,
),
"point-polygon-point": (
"""Point and polygon share a point.""",
Point(0.0, 0.0),
point_polygon,
),
"point-polygon-edge": (
"""Point and polygon intersect.""",
Point(0.5, 0.0),
point_polygon,
),
"point-polygon-in": (
"""Point is in polygon interior.""",
Point(0.5, 0.5),
point_polygon,
),
"linestring-linestring-disjoint": (
"""
x---x
x---x
""",
LineString([(0.0, 0.0), (1.0, 0.0)]),
LineString([(0.0, 1.0), (1.0, 1.0)]),
),
"linestring-linestring-same": (
"""
x---x
""",
LineString([(0.0, 0.0), (1.0, 0.0)]),
LineString([(0.0, 0.0), (1.0, 0.0)]),
),
"linestring-linestring-covers": (
"""
x
x
/
x
x
""",
LineString([(0.0, 0.0), (1.0, 1.0)]),
LineString([(0.25, 0.25), (0.5, 0.5)]),
),
"linestring-linestring-touches": (
"""
x
|
|
|
x---x
""",
LineString([(0.0, 0.0), (0.0, 1.0)]),
LineString([(0.0, 0.0), (1.0, 0.0)]),
),
"linestring-linestring-touch-interior": (
"""
x x
| /
| /
|/
x---x
""",
LineString([(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)]),
LineString([(0.0, 0.0), (1.0, 1.0)]),
),
"linestring-linestring-touch-edge": (
"""
x
|
|
|
x-x-x
""",
LineString([(0.0, 0.0), (1.0, 0.0)]),
LineString([(0.5, 0.0), (0.5, 1.0)]),
),
"linestring-linestring-touch-edge-twice": (
"""
x
x
/ \\
x---x
x
""",
LineString([(0.0, 0.0), (1.0, 1.0), (2.0, 2.0)]),
LineString([(0.25, 0.25), (1.0, 0.0), (0.5, 0.5)]),
),
"linestring-linestring-crosses": (
"""
x
|
x-|-x
|
x
""",
LineString([(0.5, 0.0), (0.5, 1.0)]),
LineString([(0.0, 0.5), (1.0, 0.5)]),
),
"linestring-linestring-touch-and-cross": (
"""
x
|
x
|\\
x---x
x
""",
LineString([(0.0, 0.0), (1.0, 1.0)]),
LineString([(0.5, 0.5), (1.0, 0.1), (-1.0, 0.1)]),
),
"linestring-polygon-disjoint": (
"""
point_polygon above is drawn as
-----
| |
| |
| |
-----
and the corresponding linestring is drawn as
x---x
or
x
|
|
|
x
"""
"""
x -----
| | |
| | |
| | |
x -----
""",
LineString([(-0.5, 0.0), (-0.5, 1.0)]),
point_polygon,
),
"linestring-polygon-touch-point": (
"""
x---x----
| |
| |
| |
-----
""",
LineString([(-1.0, 0.0), (0.0, 0.0)]),
point_polygon,
),
"linestring-polygon-touch-edge": (
"""
-----
| |
x---x |
| |
-----
""",
LineString([(-1.0, 0.5), (0.0, 0.5)]),
point_polygon,
),
"linestring-polygon-overlap-edge": (
"""
x----
| |
| |
| |
x----
""",
LineString([(0.0, 0.0), (0.0, 1.0)]),
point_polygon,
),
"linestring-polygon-intersect-edge": (
"""
-----
| |
| |
| |
x---x--
""",
LineString([(-0.5, 0.0), (0.5, 0.0)]),
point_polygon,
),
"linestring-polygon-intersect-inner-edge": (
"""
-----
x |
| |
x |
-----
The linestring in this case is shorter than the corners of the polygon.
""",
LineString([(0.25, 0.0), (0.75, 0.0)]),
point_polygon,
),
"linestring-polygon-point-interior": (
"""
----x
| /|
| / |
|/ |
x----
""",
LineString([(0.0, 0.0), (1.0, 1.0)]),
point_polygon,
),
"linestring-polygon-edge-interior": (
"""
--x--
| | |
| | |
| | |
--x--
""",
LineString([(0.5, 0.0), (0.5, 1.0)]),
point_polygon,
),
"linestring-polygon-in": (
"""
-----
| x |
| | |
| x |
-----
""",
LineString([(0.5, 0.25), (0.5, 0.75)]),
point_polygon,
),
"linestring-polygon-in-out": (
"""
-----
| |
| x |
| | |
--|--
|
x
""",
LineString([(0.5, 0.5), (0.5, -0.5)]),
point_polygon,
),
"linestring-polygon-crosses": (
"""
x
--|--
| | |
| | |
| | |
--|--
x
""",
LineString([(0.5, 1.25), (0.5, -0.25)]),
point_polygon,
),
"linestring-polygon-cross-concave-edge": (
"""
x x x
|\\ | /|
| xx- |
| | |
---x---
""",
LineString([(0.5, 0.0), (0.5, 1.0)]),
Polygon([(0, 0), (0, 1), (0.3, 0.4), (1, 1), (1, 0)]),
),
"linestring-polygon-half-in": (
"""
-----
| |
| x |
|/ \\|
xx-xx
""",
LineString(
[(0.0, 0.0), (0.25, 0.0), (0.5, 0.5), (0.75, 0.0), (1.0, 0.0)]
),
point_polygon,
),
"linestring-polygon-half-out": (
"""
-----
| |
| |
| |
xx-xx
\\/
x
""",
LineString(
[(0.0, 0.0), (0.25, 0.0), (0.5, -0.5), (0.75, 0.0), (1.0, 0.0)]
),
point_polygon,
),
"linestring-polygon-two-edges": (
"""
x----
| |
| |
| |
x---x
""",
LineString([(0.0, 1.0), (0.0, 0.0), (1.0, 0.0)]),
point_polygon,
),
"linestring-polygon-edge-to-interior": (
"""
x----
| |
| -x
|-/ |
x----
""",
LineString([(0.0, 1.0), (0.0, 0.0), (1.0, 0.5)]),
point_polygon,
),
"linestring-polygon-edge-cross-to-exterior": (
"""
x------
| |
| ---x
| --- |
x------
""",
LineString([(0.0, 1.0), (0.0, 0.0), (1.5, 0.5)]),
point_polygon,
),
"polygon-polygon-disjoint": (
"""
Polygon polygon tests use a triangle for the lhs and a square for the rhs.
The triangle is drawn as
x---x
| /
| /
|/
x
The square is drawn as
-----
| |
| |
| |
-----
""",
Polygon([(0.0, 2.0), (0.0, 3.0), (1.0, 3.0)]),
point_polygon,
),
"polygon-polygon-touch-point": (
"""
x---x
| /
| /
|/
x----
| |
| |
| |
-----
""",
Polygon([(0.0, 1.0), (0.0, 2.0), (1.0, 2.0)]),
point_polygon,
),
"polygon-polygon-touch-edge": (
"""
x---x
| /
| /
|/
-x--x
| |
| |
| |
-----
""",
Polygon([(0.25, 1.0), (0.25, 2.0), (1.25, 2.0)]),
point_polygon,
),
"polygon-polygon-overlap-edge": (
"""
x
|\\
| \\
| \\
x---x
| |
| |
| |
-----
""",
Polygon([(0.0, 1.0), (0.0, 2.0), (1.0, 2.0)]),
point_polygon,
),
"polygon-polygon-overlap-inside-edge": (
"""
x
/|
x---x |
\\ / |
x |
/ |
x-----x
""",
Polygon([(0, 0), (1, 0), (1, 1), (0, 0)]),
Polygon([(0.25, 0.25), (0.5, 0.5), (0, 0.5), (0.25, 0.25)]),
),
"polygon-polygon-point-inside": (
"""
x---x
| /
--|-/
| |/|
| x |
| |
-----
""",
Polygon([(0.5, 0.5), (0.5, 1.5), (1.5, 1.5)]),
point_polygon,
),
"polygon-polygon-point-outside": (
"""
x
-|\\--
|x-x|
| |
| |
-----
""",
Polygon([(0.25, 0.75), (0.25, 1.25), (0.75, 0.75)]),
point_polygon,
),
"polygon-polygon-in-out-point": (
"""
x
|\\
--|-x
| |/|
| x |
| |
x----
""",
Polygon([(0.5, 0.5), (0.5, 1.5), (1.0, 1.0)]),
point_polygon,
),
"polygon-polygon-in-point-point": (
"""
x----
|\\ |
| x |
|/ |
x----
""",
Polygon([(0.0, 0.0), (0.0, 1.0), (0.5, 0.5)]),
point_polygon,
),
"polygon-polygon-contained": (
"""
-----
| x|
| /||
|x-x|
-----
""",
Polygon([(0.25, 0.25), (0.75, 0.75), (0.75, 0.25)]),
point_polygon,
),
"polygon-polygon-same": (
"""
x---x
| |
| |
| |
x---x
""",
point_polygon,
point_polygon,
),
}
point_point_dispatch_list = [
"point-point-disjoint",
"point-point-equal",
]
point_linestring_dispatch_list = [
"point-linestring-disjoint",
"point-linestring-point",
"point-linestring-edge",
]
point_polygon_dispatch_list = [
"point-polygon-disjoint",
"point-polygon-point",
"point-polygon-edge",
"point-polygon-in",
]
linestring_linestring_dispatch_list = [
"linestring-linestring-disjoint",
"linestring-linestring-same",
"linestring-linestring-covers",
"linestring-linestring-touches",
"linestring-linestring-touch-interior",
"linestring-linestring-touch-edge",
"linestring-linestring-touch-edge-twice",
"linestring-linestring-crosses",
"linestring-linestring-touch-and-cross",
]
linestring_polygon_dispatch_list = [
"linestring-polygon-disjoint",
"linestring-polygon-touch-point",
"linestring-polygon-touch-edge",
"linestring-polygon-overlap-edge",
"linestring-polygon-intersect-edge",
"linestring-polygon-intersect-inner-edge",
"linestring-polygon-point-interior",
"linestring-polygon-edge-interior",
"linestring-polygon-in",
"linestring-polygon-crosses",
"linestring-polygon-cross-concave-edge",
"linestring-polygon-half-in",
"linestring-polygon-half-out",
"linestring-polygon-two-edges",
"linestring-polygon-edge-to-interior",
"linestring-polygon-edge-cross-to-exterior",
]
polygon_polygon_dispatch_list = [
"polygon-polygon-disjoint",
"polygon-polygon-touch-point",
"polygon-polygon-touch-edge",
"polygon-polygon-overlap-edge",
"polygon-polygon-overlap-inside-edge",
"polygon-polygon-point-inside",
"polygon-polygon-point-outside",
"polygon-polygon-in-out-point",
"polygon-polygon-in-point-point",
"polygon-polygon-contained",
"polygon-polygon-same",
]
def object_dispatch(name_list):
"""Generate a list of test cases for a given set of test names."""
for name in name_list:
yield (name, features[name][0], features[name][1], features[name][2])
type_dispatch = {
# A dictionary of test cases for each geometry type combination.
# Still needs MultiPoint.
(Point, Point): object_dispatch(point_point_dispatch_list),
(Point, LineString): object_dispatch(point_linestring_dispatch_list),
(Point, Polygon): object_dispatch(point_polygon_dispatch_list),
(LineString, LineString): object_dispatch(
linestring_linestring_dispatch_list
),
(LineString, Polygon): object_dispatch(linestring_polygon_dispatch_list),
(Polygon, Polygon): object_dispatch(polygon_polygon_dispatch_list),
}
def simple_test_dispatch():
"""Generates a list of test cases for each geometry type combination.
Each dispatched test case is a tuple of the form:
(test_name, test_description, lhs, rhs)
which is run in `test_binpred_test_dispatch.py`.
The test_name is a unique identifier for the test case.
The test_description is a string representation of the test case.
The lhs and rhs are GeoSeries of the left and right geometries.
lhs and rhs are always constructed as a list of 3 geometries since
the binpred function is designed to operate primarily on groups of
geometries. The first and third feature in the list always match
the first geometry specified in `test_description`, and the rhs is always
a group of three of the second geometry specified in `test_description`.
The second feature in the lhs varies.
When the types of the lhs and rhs are equal, the second geometry
from `test_description` is substituted for the second geometry in the lhs.
This produces a test form of:
lhs rhs
A B
B B
A B
This decision has two primary benefits:
1. It causes the test to produce varied results (meaning results of the
form (True, False, True) or (False, True, False), greatly reducing the
likelihood of an "all-False" or "all-True" predicate producing
false-positive results.
2. It tests every binary predicate against self, such as A.touches(A)
for every predicate and geometry combination.
When the types of lhs and rhs are not equal this variation is not
performed, since we cannot currently use predicate operations on mixed
geometry types.
"""
for types in type_dispatch:
generator = type_dispatch[types]
for test_name, test_description, lhs, rhs in generator:
yield (
test_name,
test_description,
cuspatial.GeoSeries(
[
lhs,
rhs if types[0] == types[1] else lhs,
lhs,
]
),
cuspatial.GeoSeries(
[
rhs,
rhs,
rhs,
]
),
)
@pytest.fixture(params=simple_test_dispatch())
def simple_test(request):
"""Generates a unique test case for each geometry type combination."""
return request.param
|
acdb30d1f9f42172d027acd27aa716e4b7464b3f
|
84c498e7a634f1c54c0caaf18ede3f20f8852dc8
|
/ExHIBIT/rld_text_output.py
|
c773f4f0dfee709da190ca1f17afa3d4abe9db5e
|
[] |
no_license
|
Yggdrasill-Moe/Niflheim
|
4697e8d5cea5da7e5732f925b190c9d47ef94071
|
51048e7af2ae2c69db772ff59ac26390ab0ea73b
|
refs/heads/master
| 2023-07-19T18:34:26.398212
| 2023-07-17T01:11:04
| 2023-07-17T01:11:04
| 163,126,766
| 103
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
rld_text_output.py
|
# -*- coding:utf-8 -*-
#用于导出rld文件的文本
#by Darkness-TX
#2017.11.17
import struct
import os
import sys
import io
def byte2int(byte):
long_tuple = struct.unpack('L',byte)
long = long_tuple[0]
return long
def int2byte(num):
return struct.pack('L',num)
def dumpstr(src):
bstr = b''
c = src.read(1)
while c != b'\x00':
bstr += c
c = src.read(1)
return bstr.decode('932')
def FormatString(string, count):
res = "○%08d○\n%s\n●%08d●\n%s\n\n"%(count, string, count, string)
return res
def ReadHeader(src):
tag = ''
magic = src.read(4)
ver = byte2int(src.read(4))
offset = byte2int(src.read(4))
count = byte2int(src.read(4))
flag = byte2int(src.read(4))
if flag == 1:
tag = dumpstr(src)
return magic, ver, offset, count, flag, tag
def Opcode_Analysis(src):
buff = byte2int(src.read(4))
op = buff & 0xFFFF
init_count = (buff & 0xFF0000) >> 16
str_count = (buff >> 24) & 0xF
unk = buff >> 28
return op, init_count, str_count, unk
def Get_Name_Table(dump_str):
names = {}
for l in dump_str:
group = l.split(',')
names[int(group[0])] = group[3]
return names
def rld_output(fname, name_table=[]):
print(fname)
src = open(os.path.join('rld', fname), 'rb')
magic, ver, offset, count, flag, tag = ReadHeader(src)
if magic != b'\00DLR':
print(fname + "不是支持的类型")
else:
src.seek(offset + 4, os.SEEK_SET)
dst = open(os.path.join('rld', fname[:-4] + '.txt'), 'w', encoding='utf16')
l = 0
dump_str = []
for i in range(0, count):
opcode, init_count, str_count, unk = Opcode_Analysis(src)
all_init = []
all_str =[]
for j in range(0, init_count):
val = src.read(4)
all_init.append(byte2int(val))
for k in range(0, str_count):
row = dumpstr(src)
all_str.append(row)
if opcode == 28:
if all_init[0] in name_table:
dst.write(FormatString(name_table[all_init[0]], l))
dump_str.append(name_table[all_init[0]])
l += 1
for string in all_str:
if string != '*' and string != '$noname$' and len(string) != 0 and string.count(',') < 2:
dst.write(FormatString(string, l))
dump_str.append(string)
l += 1
elif opcode == 21:
for string in all_str:
if string != '*' and string != '$noname$' and len(string) != 0 and string.count(',') < 2:
dst.write(FormatString(string, l))
dump_str.append(string)
l += 1
elif opcode == 48:
dst.write(FormatString(all_str[0], l))
dump_str.append(all_str[0])
l += 1
elif opcode == 191:
if len(all_str[0]) != len(all_str[0].encode('932')):
dst.write(FormatString(all_str[0], l))
dump_str.append(all_str[0])
l += 1
return dump_str
def main():
dump_str = rld_output('defChara.bin')
name_table = Get_Name_Table(dump_str)
for f in os.listdir('rld'):
if not f.endswith('.bin') or f == 'defChara.bin':
continue
rld_output(f, name_table)
main()
|
0a8d6b01639cee43b514bbabb28a31f6e8adaae6
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/spacy/tests/doc/test_span_merge.py
|
ae1f4f4a18a246f794d78e929a2b89eead7dc836
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,841
|
py
|
test_span_merge.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import get_doc
from ...vocab import Vocab
from ...tokens import Doc
import pytest
def test_spans_merge_tokens(en_tokenizer):
text = "Los Angeles start."
heads = [1, 1, 0, -1]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
assert len(doc) == 4
assert doc[0].head.text == 'Angeles'
assert doc[1].head.text == 'start'
doc.merge(0, len('Los Angeles'), tag='NNP', lemma='Los Angeles', ent_type='GPE')
assert len(doc) == 3
assert doc[0].text == 'Los Angeles'
assert doc[0].head.text == 'start'
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
assert len(doc) == 4
assert doc[0].head.text == 'Angeles'
assert doc[1].head.text == 'start'
doc.merge(0, len('Los Angeles'), tag='NNP', lemma='Los Angeles', label='GPE')
assert len(doc) == 3
assert doc[0].text == 'Los Angeles'
assert doc[0].head.text == 'start'
assert doc[0].ent_type_ == 'GPE'
def test_spans_merge_heads(en_tokenizer):
text = "I found a pilates class near work."
heads = [1, 0, 2, 1, -3, -1, -1, -6]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
assert len(doc) == 8
doc.merge(doc[3].idx, doc[4].idx + len(doc[4]), tag=doc[4].tag_,
lemma='pilates class', ent_type='O')
assert len(doc) == 7
assert doc[0].head.i == 1
assert doc[1].head.i == 1
assert doc[2].head.i == 3
assert doc[3].head.i == 1
assert doc[4].head.i in [1, 3]
assert doc[5].head.i == 4
def test_span_np_merges(en_tokenizer):
text = "displaCy is a parse tool built with Javascript"
heads = [1, 0, 2, 1, -3, -1, -1, -1]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
assert doc[4].head.i == 1
doc.merge(doc[2].idx, doc[4].idx + len(doc[4]), tag='NP', lemma='tool',
ent_type='O')
assert doc[2].head.i == 1
text = "displaCy is a lightweight and modern dependency parse tree visualization tool built with CSS3 and JavaScript."
heads = [1, 0, 8, 3, -1, -2, 4, 3, 1, 1, -9, -1, -1, -1, -1, -2, -15]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
ents = [(e[0].idx, e[-1].idx + len(e[-1]), e.label_, e.lemma_) for e in doc.ents]
for start, end, label, lemma in ents:
merged = doc.merge(start, end, tag=label, lemma=lemma, ent_type=label)
assert merged != None, (start, end, label, lemma)
text = "One test with entities like New York City so the ents list is not void"
heads = [1, 11, -1, -1, -1, 1, 1, -3, 4, 2, 1, 1, 0, -1, -2]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
for span in doc.ents:
merged = doc.merge()
assert merged != None, (span.start, span.end, span.label_, span.lemma_)
def test_spans_entity_merge(en_tokenizer):
text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale.\n"
heads = [1, 1, 0, 1, 2, -1, -4, 1, -2, -1, -1, -3, -10, 1, -2, -13, -1]
tags = ['NNP', 'NNP', 'VBZ', 'DT', 'VB', 'RP', 'NN', 'WP', 'VBZ', 'IN', 'NNP', 'CC', 'VBZ', 'NNP', 'NNP', '.', 'SP']
ents = [('Stewart Lee', 'PERSON', 0, 2), ('England', 'GPE', 10, 11), ('Joe Pasquale', 'PERSON', 13, 15)]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, tags=tags, ents=ents)
assert len(doc) == 17
for ent in doc.ents:
label, lemma, type_ = (ent.root.tag_, ent.root.lemma_, max(w.ent_type_ for w in ent))
ent.merge(label=label, lemma=lemma, ent_type=type_)
# check looping is ok
assert len(doc) == 15
def test_spans_entity_merge_iob():
# Test entity IOB stays consistent after merging
words = ["a", "b", "c", "d", "e"]
doc = Doc(Vocab(), words=words)
doc.ents = [(doc.vocab.strings.add('ent-abc'), 0, 3),
(doc.vocab.strings.add('ent-d'), 3, 4)]
assert doc[0].ent_iob_ == "B"
assert doc[1].ent_iob_ == "I"
assert doc[2].ent_iob_ == "I"
assert doc[3].ent_iob_ == "B"
doc[0:1].merge()
assert doc[0].ent_iob_ == "B"
assert doc[1].ent_iob_ == "I"
def test_spans_sentence_update_after_merge(en_tokenizer):
text = "Stewart Lee is a stand up comedian. He lives in England and loves Joe Pasquale."
heads = [1, 1, 0, 1, 2, -1, -4, -5, 1, 0, -1, -1, -3, -4, 1, -2, -7]
deps = ['compound', 'nsubj', 'ROOT', 'det', 'amod', 'prt', 'attr',
'punct', 'nsubj', 'ROOT', 'prep', 'pobj', 'cc', 'conj',
'compound', 'dobj', 'punct']
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps)
sent1, sent2 = list(doc.sents)
init_len = len(sent1)
init_len2 = len(sent2)
doc[0:2].merge(label='none', lemma='none', ent_type='none')
doc[-2:].merge(label='none', lemma='none', ent_type='none')
assert len(sent1) == init_len - 1
assert len(sent2) == init_len2 - 1
def test_spans_subtree_size_check(en_tokenizer):
text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale"
heads = [1, 1, 0, 1, 2, -1, -4, 1, -2, -1, -1, -3, -10, 1, -2]
deps = ['compound', 'nsubj', 'ROOT', 'det', 'amod', 'prt', 'attr',
'nsubj', 'relcl', 'prep', 'pobj', 'cc', 'conj', 'compound',
'dobj']
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps)
sent1 = list(doc.sents)[0]
init_len = len(list(sent1.root.subtree))
doc[0:2].merge(label='none', lemma='none', ent_type='none')
assert len(list(sent1.root.subtree)) == init_len - 1
|
979abdec47490f9e1070389d330f62ff32397217
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/1153.py
|
fbb3154e714eb597eae77b7cdb6cb7024a56597c
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
1153.py
|
__________________________________________________________________________________________________
class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
s1 = dict()
for i, ch in enumerate(str1):
if ch not in s1:
s1[ch] = list()
s1[ch].append(i)
s2 = dict()
for i, ch in enumerate(str2):
if ch not in s2:
s2[ch] = list()
s2[ch].append(i)
if len(s1) == len(s2) == 26 and str1 != str2:
return False
for k, v in s1.items():
pivot = str2[v[0]]
for pos in v:
if str2[pos] != pivot:
return False
return True
__________________________________________________________________________________________________
__________________________________________________________________________________________________
|
0236f4c415f7bb805641d7ae463075552f443ffd
|
fae16a539b7c1b0525aab40ddaeee3e451fc9b74
|
/tests/unit/tasks/database/users/test_migrate.py
|
9bc28f3376e243bdcf3dc1b42479798f4381e159
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
argilla-io/argilla
|
a6b45f7f64e9db82f6d9a61771d758ffbb3dab4a
|
7c1b2368b444b7b7a281d37ad51bcb2d8e92acf5
|
refs/heads/develop
| 2023-09-04T03:58:05.914619
| 2023-09-01T15:58:31
| 2023-09-01T15:58:31
| 362,500,938
| 1,085
| 122
|
Apache-2.0
| 2023-09-14T15:31:07
| 2021-04-28T14:37:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,905
|
py
|
test_migrate.py
|
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import TYPE_CHECKING
from argilla.server.models import User, UserRole, Workspace, WorkspaceUser
from argilla.server.security.auth_provider.local.settings import settings
from click.testing import CliRunner
from typer import Typer
if TYPE_CHECKING:
from sqlalchemy.orm import Session
def test_migrate(monkeypatch, sync_db: "Session", cli_runner: CliRunner, cli: Typer):
monkeypatch.setattr(
settings, "users_db_file", os.path.join(os.path.dirname(__file__), "test_user_files", "users.yml")
)
result = cli_runner.invoke(cli, "database users migrate")
assert result.exit_code == 0
assert sync_db.query(User).count() == 5
assert sync_db.query(Workspace).count() == 9
assert sync_db.query(WorkspaceUser).count() == 11
user = sync_db.query(User).filter_by(username="john").first()
assert user.first_name == "John Doe"
assert user.username == "john"
assert user.role == UserRole.owner
assert user.api_key == "a14427ea-9197-11ec-b909-0242ac120002"
assert user.password_hash == "$2y$05$xtl7iy3bpqchUwiQMjEHe.tY7OaIjDrg43W3TB4EHQ7izvdjvGtPS"
assert [ws.name for ws in user.workspaces] == ["john"]
user = sync_db.query(User).filter_by(username="tanya").first()
assert user.first_name == "Tanya Franklin"
assert user.username == "tanya"
assert user.role == UserRole.annotator
assert user.api_key == "78a10b53-8db7-4ab5-9e9e-fbd4b7e76551"
assert user.password_hash == "$2y$05$aqNyXcXRXddNj5toZwT0HugHqKZypvqlBAkZviAGGbsAC8oTj/P5K"
assert [ws.name for ws in user.workspaces] == ["tanya", "argilla", "team"]
user = sync_db.query(User).filter_by(username="daisy").first()
assert user.first_name == "Daisy Gonzalez"
assert user.username == "daisy"
assert user.role == UserRole.annotator
assert user.api_key == "a8168929-8668-494c-b7a5-98cd35740d9b"
assert user.password_hash == "$2y$05$l83IhUs4ZDaxsgZ/P12FO.RFTi2wKQ2AxMK2vYtLx//yKramuCcZG"
assert set([ws.name for ws in user.workspaces]) == {"daisy", "argilla", "team", "latam"}
user = sync_db.query(User).filter_by(username="macleod").first()
assert user.first_name == ""
assert user.username == "macleod"
assert user.role == UserRole.annotator
assert user.api_key == "7c3b4d6e-1898-4c42-84c8-e1758cea1ce0"
assert user.password_hash == "$2y$05$Fb3iv7AGv8k.o5cl9qdCtuwkrLcDcSYKWyJk1QNl6RXKUecvP.Ium"
assert [ws.name for ws in user.workspaces] == ["macleod", "highlands"]
user = sync_db.query(User).filter_by(username="sanchez").first()
assert user.first_name == "Juan Sánchez Villalobos Ramírez"
assert user.username == "sanchez"
assert user.role == UserRole.annotator
assert user.api_key == "ac7b6b86-7d63-45ce-a76a-08f64e0d5fd6"
assert user.password_hash == "$2y$05$wMvfoz2TwrRFRZhNELHjbOcqEucVYImNORuRvh7Vp26.dIqvo9tY2"
assert [ws.name for ws in user.workspaces] == ["sanchez"]
def test_migrate_with_one_user_file(monkeypatch, sync_db: "Session", cli_runner: CliRunner, cli: Typer):
monkeypatch.setattr(
settings, "users_db_file", os.path.join(os.path.dirname(__file__), "test_user_files", "users_one.yml")
)
result = cli_runner.invoke(cli, "database users migrate")
assert result.exit_code == 0
assert sync_db.query(User).count() == 1
assert sync_db.query(Workspace).count() == 3
assert sync_db.query(WorkspaceUser).count() == 3
user = sync_db.query(User).filter_by(username="john").first()
assert user.first_name == "John Doe"
assert user.username == "john"
assert user.role == UserRole.annotator
assert user.api_key == "a14427ea-9197-11ec-b909-0242ac120002"
assert user.password_hash == "$2y$05$xtl7iy3bpqchUwiQMjEHe.tY7OaIjDrg43W3TB4EHQ7izvdjvGtPS"
assert [ws.name for ws in user.workspaces] == ["john", "argilla", "team"]
def test_migrate_with_invalid_user(monkeypatch, sync_db: "Session", cli_runner: CliRunner, cli: Typer):
monkeypatch.setattr(
settings, "users_db_file", os.path.join(os.path.dirname(__file__), "test_user_files", "users_invalid_user.yml")
)
result = cli_runner.invoke(cli, "database users migrate")
assert result.exit_code == 1
assert sync_db.query(User).count() == 0
assert sync_db.query(Workspace).count() == 0
assert sync_db.query(WorkspaceUser).count() == 0
def test_migrate_with_invalid_workspace(monkeypatch, sync_db: "Session", cli_runner: CliRunner, cli: Typer):
monkeypatch.setattr(
settings,
"users_db_file",
os.path.join(os.path.dirname(__file__), "test_user_files", "users_invalid_workspace.yml"),
)
result = cli_runner.invoke(cli, "database users migrate")
assert result.exit_code == 1
assert sync_db.query(User).count() == 0
assert sync_db.query(Workspace).count() == 0
assert sync_db.query(WorkspaceUser).count() == 0
def test_migrate_with_nonexistent_file(monkeypatch, sync_db: "Session", cli_runner: CliRunner, cli: Typer):
monkeypatch.setattr(settings, "users_db_file", "nonexistent.yml")
result = cli_runner.invoke(cli, "database users migrate")
assert result.exit_code == 1
assert sync_db.query(User).count() == 0
assert sync_db.query(Workspace).count() == 0
assert sync_db.query(WorkspaceUser).count() == 0
|
32af1e4ed2fb20136a20118b1021b328ef55336d
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/autograph/pyct/transformer_test.py
|
8708200210a2ed2afa581d114e5c2887659dfd50
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 11,306
|
py
|
transformer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
import re
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='Test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_state_tracking_context_manager(self):
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_If(self, node):
with self.state[CondState]:
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
if a > 2:
_ = 'b'
if a < 5:
_ = 'c'
_ = 'd'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_if_body = fn_body[1].body
self.assertDifferentAnno(fn_body[0], outer_if_body[0], 'cond_state')
self.assertSameAnno(outer_if_body[0], outer_if_body[2], 'cond_state')
inner_if_body = outer_if_body[1].body
self.assertDifferentAnno(inner_if_body[0], outer_if_body[0], 'cond_state')
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(
gast.Name(
'x', ctx=gast.Load(), annotation=None, type_comment=None),
[node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
self.assertEqual(len(node.body), 2)
self.assertIsInstance(node.body[0], gast.Assign)
self.assertIsInstance(node.body[1], gast.If)
self.assertIsInstance(node.body[1].body[0], gast.Assign)
self.assertIsInstance(node.body[1].body[1], gast.Return)
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegex(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertIn(expected_substring, obtained_message)
def test_origin_info_propagated_to_new_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return gast.Pass()
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
created_pass_node = node.body[1]
# Takes the line number of the if statement.
self.assertEqual(
anno.getanno(created_pass_node, anno.Basic.ORIGIN).loc.lineno, 102)
def test_origin_info_preserved_in_moved_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return node.body
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
x += 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
assign_node = node.body[1]
aug_assign_node = node.body[2]
# Keep their original line numbers.
self.assertEqual(
anno.getanno(assign_node, anno.Basic.ORIGIN).loc.lineno, 103)
self.assertEqual(
anno.getanno(aug_assign_node, anno.Basic.ORIGIN).loc.lineno, 104)
class CodeGeneratorTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def test_basic_codegen(self):
class TestCodegen(transformer.CodeGenerator):
def visit_Assign(self, node):
self.emit(parser.unparse(node, include_encoding_marker=False))
self.emit('\n')
def visit_Return(self, node):
self.emit(parser.unparse(node, include_encoding_marker=False))
self.emit('\n')
def visit_If(self, node):
self.emit('if ')
# This is just for simplifity. A real generator will walk the tree and
# emit proper code.
self.emit(parser.unparse(node.test, include_encoding_marker=False))
self.emit(' {\n')
self.visit_block(node.body)
self.emit('} else {\n')
self.visit_block(node.orelse)
self.emit('}\n')
tg = TestCodegen(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 2
if x > 1:
x = 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
tg.visit(node)
r = re.compile('.*'.join([
r'x = 1',
r'if \(?x > 0\)? {',
r'x = 2',
r'if \(?x > 1\)? {',
r'x = 3',
r'} else {',
r'}',
r'} else {',
r'}',
r'return x']), re.DOTALL)
self.assertRegex(tg.code_buffer, r)
# TODO(mdan): Test the source map.
if __name__ == '__main__':
test.main()
|
cdc782cd0087fbea5b6c57022e3d12d765d5f5fd
|
6b27c39edc10b1353104043b7a523f4981c99ef2
|
/pytype/test_data/tokenerror1.py
|
a81d29a63092a3e42beafb8ec2e3d4bf46dad09e
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/pytype
|
ad0ff0b6c1083b4f0a1af1747869d422f2b5f4d8
|
bda0b9547af9a084bb2bd1427f58dcde968e48b5
|
refs/heads/main
| 2023-08-26T17:52:23.546035
| 2023-08-24T22:48:00
| 2023-08-24T22:48:00
| 32,483,713
| 4,595
| 367
|
NOASSERTION
| 2023-09-13T04:40:45
| 2015-03-18T20:52:08
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
tokenerror1.py
|
# pylint: skip-file
"""Test program with a token error.
The error is this triple-quoted string doesn't end.
|
676e957eab1c20624391d6116d84fa9078b8132c
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/bnpy/allocmodel/relational/FiniteSMSB.py
|
90cea6c6df07051ffc30c8188679d01b1ddfc7b2
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,971
|
py
|
FiniteSMSB.py
|
'''
SMSB.py
Finite single membership stochastic block model. Follows generative process:
For all nodes i:
Draw pi_i ~ Dirichlet(gamma)
Draw z_i ~ Categorical(pi_i)
For all other nodes j:
Draw x_{ij} ~ Bernouli(w_{z_i, z_j})
For l,m = 1,...,K:
w_{lm} ~ Beta(tau_1, tau_0)
'''
import numpy as np
from bnpy.util import logsumexp
from bnpy.util import gammaln, digamma, EPS
from bnpy.suffstats import SuffStatBag
from bnpy.util import StateSeqUtil
from bnpy.allocmodel.mix.FiniteMixtureModel import FiniteMixtureModel
from bnpy.allocmodel import AllocModel
class FiniteSMSB(FiniteMixtureModel):
''' Single membership stochastic block model, with K components.
Attributes
-------
* inferType : string {'EM', 'VB', 'moVB', 'soVB'}
indicates which updates to perform for local/global steps
* K : int
number of components
* alpha : float
scalar symmetric Dirichlet prior on mixture weights
Attributes for VB
---------
TODO
'''
def __init__(self, inferType, priorDict=dict()):
self.EStepLaps = 25
super(FiniteSMSB, self).__init__(inferType, priorDict)
self.set_prior(**priorDict)
self.K = 0
self.Npair = 0
self.hamming = 0
self.inferType = inferType
self.estZ = 0
def getSSDims(self):
''' Get dimensions of interactions between components.
Overrides default of ('K',), as we need E_log_soft_ev to be
dimension E x K x K
'''
return ('K', 'K',)
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate local parameters for each data item and each component.
This is part of the E-step.
Note that this is the main place we differ from FiniteMixtureModel.py
Args
-------
Data : bnpy data object with Data.nObs observations
LP : local param dict with fields
E_log_soft_ev : Data.nObs x K x K array
E_log_soft_ev[n,l,m] = log p(data obs n | comps l, m)
Returns
-------
LP : local param dict with fields
resp : 2D array, size Data.nObs x K array
resp[n,l,m] = posterior responsibility comps. l,m have for
item n
'''
if self.inferType.count('EM') > 0:
raise NotImplementedError(
'EM not implemented for FiniteSMSB (yet)')
N = Data.nNodes
K = self.K
logSoftEv = LP['E_log_soft_ev'] # E x K x K
logSoftEv[np.where(Data.sourceID == Data.destID), :, :] = 0
logSoftEv = np.reshape(logSoftEv, (N, N, K, K))
if 'respSingle' not in LP:
LP['respSingle'] = np.ones((N, K)) / K
resp = LP['respSingle']
Elogpi = digamma(self.theta) - digamma(np.sum(self.theta)) # Size K
respTerm = np.zeros(K)
for lap in range(self.EStepLaps):
for i in range(Data.nNodes):
respTerm = np.einsum(
'jlm,jm->l', logSoftEv[i, :, :, :], resp) + \
np.einsum('jlm,jl->m', logSoftEv[:, i, :, :], resp)
resp[i, :] = np.exp(Elogpi + respTerm)
resp[i, :] /= np.sum(resp[i, :])
# For now, do the stupid thing of building the N^2 x K resp matrix
# (soon to change when using sparse data)
# np.einsum makes fullResp[i,j,l,m] = resp[i,l]*resp[j,m]
fullResp = np.einsum('il,jm->ijlm', resp, resp)
fullResp = fullResp.reshape((N**2, K, K))
fullResp[np.where(Data.sourceID == Data.destID), :, :] = 0
LP['resp'] = fullResp
LP['respSingle'] = resp
self.make_hard_asgn_local_params(Data, LP)
return LP
def make_hard_asgn_local_params(self, Data, LP):
''' Convert soft assignments to hard assignments.
Returns
--------
LP : local params dict, with new fields
Z : 1D array, size N
Z[n] is an integer in range {0, 1, 2, ... K-1}
resp : 2D array, size N x K+1 (with final column empty)
resp[n,k] = 1 iff Z[n] == k
'''
Z = np.argmax(LP['respSingle'], axis=1)
self.estZ = Z
def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
''' Calculate the sufficient statistics for global parameter updates
Only adds stats relevant for this allocModel.
Other stats are added by the obsModel.
Args
-------
Data : bnpy data object
LP : local param dict with fields
resp : Data.nObs x K array,
where resp[n,k] = posterior resp of comp k
doPrecompEntropy : boolean flag
indicates whether to precompute ELBO terms in advance
used for memoized learning algorithms (moVB)
Returns
-------
SS : SuffStats for K components, with field
N : vector of dimension K,
effective number of observations assigned to each comp
Npair : matrix of dimensions K x K, where Npair[l,m] =
effective # of obs x_{ij} with z_{il} and z_{jm}
'''
Npair = np.sum(LP['resp'], axis=0)
self.Npair = Npair
N = np.sum(LP['respSingle'], axis=0)
SS = SuffStatBag(K=N.shape[0], D=Data.dim)
SS.setField('Npair', Npair, dims=('K', 'K'))
SS.setField('N', N, dims=('K',))
if doPrecompEntropy is not None:
ElogqZ_vec = self.E_logqZ(LP)
SS.setELBOTerm('ElogqZ', ElogqZ_vec, dims=('K',))
return SS
def E_logqZ(self, LP):
return np.sum(
LP['respSingle'] * np.log(LP['respSingle'] + EPS), axis=0)
def to_dict(self):
myDict = super(FiniteSMSB, self).to_dict()
myDict['Npair'] = self.Npair
myDict['estZ'] = self.estZ
return myDict
|
f8560e2718d68cd6a0041614fdf6460c4127d389
|
7d0ec90fa17c202d71290a526fb11b1c5f6a7a92
|
/test/test_continuous.py
|
2174368a891df9b21edc2c8706c699a6e5a6c8a2
|
[
"BSD-3-Clause"
] |
permissive
|
airspeed-velocity/asv
|
511aeb5730bfb017fcb56dc626b356ba3255c9a6
|
880a237ab39e566383e93340a3c895616f553f1d
|
refs/heads/master
| 2023-08-28T09:13:26.092337
| 2023-08-23T22:37:04
| 2023-08-23T22:37:04
| 14,215,348
| 614
| 112
|
BSD-3-Clause
| 2023-09-11T04:46:14
| 2013-11-07T20:43:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
test_continuous.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import re
from asv.results import iter_results_for_machine
from asv import util
from . import tools
from .tools import get_default_environment_type
def test_continuous(capfd, basic_conf_2):
tmpdir, local, conf, machine_file = basic_conf_2
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
# Check that asv continuous runs
tools.run_asv_with_conf(conf, 'continuous',
f"{util.git_default_branch()}^", '--show-stderr',
'--bench=params_examples.track_find_test',
'--bench=params_examples.track_param',
'--bench=time_examples.TimeSuite.time_example_benchmark_1',
'--attribute=repeat=1', '--attribute=number=1',
'--attribute=warmup_time=0',
*env_spec, _machine_file=machine_file)
text, err = capfd.readouterr()
assert "SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY" in text
assert "PERFORMANCE INCREASED" in text or "PERFORMANCE DECREASED" in text
# Check output, but not the whole row
pattern = r"params_examples\.track_find_test\(2\) \[orangutan\/\w+-py"
assert re.search(pattern, text) is not None
assert "params_examples.ClassOne" in text
# Check rounds were interleaved (timing benchmark was run twice)
assert re.search(r"For.*commit [a-f0-9]+ (<[a-z0-9~^]+> )?\(round 1/2\)", text, re.M), text
result_found = False
for results in iter_results_for_machine(conf.results_dir, "orangutan"):
result_found = True
stats = results.get_result_stats('time_examples.TimeSuite.time_example_benchmark_1', [])
assert stats[0]['repeat'] == 2
assert result_found
|
ed947b6e63c40879b9b77662339d28d4673cb062
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/authorization/groups.py
|
e05d7f331d6ab2aa68c7723244c3d8b0da501f16
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
groups.py
|
#!/usr/bin/env python
"""Group authorization checking."""
import logging
from grr_response_core import config
from grr_response_core.lib.registry import MetaclassRegistry
class GroupAccessManager(metaclass=MetaclassRegistry):
__abstract = True # pylint: disable=g-bad-name
def AuthorizeGroup(self, group, subject):
raise NotImplementedError()
def MemberOfAuthorizedGroup(self, unused_username, unused_subject):
raise NotImplementedError()
class NoGroupAccess(GroupAccessManager):
"""Placeholder class for enabling group ACLs.
By default GRR doesn't have the concept of groups. To add it, override this
class with a module in lib/local/groups.py that inherits from the same
superclass. This class should be able to check group membership in whatever
system you use: LDAP/AD/etc.
"""
def AuthorizeGroup(self, group, subject):
raise NotImplementedError("Replace this class to use group authorizations.")
def MemberOfAuthorizedGroup(self, unused_username, unused_subject):
return False
def CreateGroupAccessManager():
group_mgr_cls = config.CONFIG["ACL.group_access_manager_class"]
logging.debug("Using group access manager: %s", group_mgr_cls)
return GroupAccessManager.classes[group_mgr_cls]()
|
0420d1874cab56a9ac8170b461142996ad98f898
|
6416b746ee71d897789eab1e450000831674dbd0
|
/tests/unit/cli/manager/test_config_manager.py
|
eefb8e6e3a656c9f8c1df5dff2be714a6c46beb0
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 28,758
|
py
|
test_config_manager.py
|
import argparse
import os
import tempfile
from pathlib import Path
import os
import shutil
import pytest
from omegaconf import DictConfig, OmegaConf
from otx.cli.manager.config_manager import (
DEFAULT_MODEL_TEMPLATE_ID,
ConfigManager,
set_workspace,
)
from otx.cli.registry import Registry
from otx.cli.utils.errors import (
CliException,
ConfigValueError,
FileNotExistError,
NotSupportedError,
)
from tests.test_suite.e2e_test_system import e2e_pytest_unit
def test_set_workspace():
task = "CLASSIFICATION"
root = "/home/user/data"
name = "otx-workspace"
expected_path = f"{root}/{name}-{task}"
assert set_workspace(task, root, name) == expected_path
expected_path = f"./{name}-{task}"
assert set_workspace(task, name=name) == expected_path
@pytest.fixture
def config_manager(mocker):
args = mocker.MagicMock()
args.template = "."
args.config_path = "path/to/config.yaml"
args.workspace_path = "path/to/workspace"
args.mode = "train"
args.task_type = "classification"
args.train_type = "incremental"
return ConfigManager(args)
class TestConfigManager:
def get_default_template(self, otx_root, task_type):
otx_registry = Registry(otx_root).filter(task_type=task_type)
return otx_registry.get(DEFAULT_MODEL_TEMPLATE_ID[task_type.upper()])
@e2e_pytest_unit
def test_check_workspace(self, mocker, config_manager):
mock_exists = mocker.patch("otx.cli.manager.config_manager.Path.exists")
# Define the return value of the `os.path.exists` function
mock_exists.return_value = True
# Call the function and check the returned value
assert config_manager.check_workspace()
mock_exists.return_value = False
assert not config_manager.check_workspace()
@e2e_pytest_unit
def test_get_arg_data_yaml(self, mocker):
# Call the function and check the returned value
args = mocker.MagicMock()
args.template = "."
args.train_data_roots = "path/to/data/train"
args.train_ann_files = None
args.val_data_roots = "path/to/data/val"
args.val_ann_files = None
args.test_data_roots = "path/to/data/test"
args.test_ann_files = None
args.unlabeled_data_roots = None
args.unlabeled_file_list = None
args.mode = "train"
config_manager = ConfigManager(args)
assert config_manager._get_arg_data_yaml() == {
"data": {
"train": {"ann-files": None, "data-roots": "path/to/data/train"},
"val": {"ann-files": None, "data-roots": "path/to/data/val"},
"test": {"ann-files": None, "data-roots": None},
"unlabeled": {"file-list": None, "data-roots": None},
}
}
config_manager.mode = "test"
assert config_manager._get_arg_data_yaml() == {
"data": {
"train": {"ann-files": None, "data-roots": None},
"val": {"ann-files": None, "data-roots": None},
"test": {"ann-files": None, "data-roots": "path/to/data/test"},
"unlabeled": {"file-list": None, "data-roots": None},
}
}
args.unlabeled_data_roots = "path/to/data/unlabeled"
config_manager = ConfigManager(args)
assert config_manager._get_arg_data_yaml() == {
"data": {
"train": {"ann-files": None, "data-roots": "path/to/data/train"},
"val": {"ann-files": None, "data-roots": "path/to/data/val"},
"test": {"ann-files": None, "data-roots": None},
"unlabeled": {"file-list": None, "data-roots": "path/to/data/unlabeled"},
}
}
@e2e_pytest_unit
def test_create_empty_data_cfg(self, config_manager):
# Call the function and check the returned value
assert config_manager._create_empty_data_cfg() == {
"data": {
"train": {"ann-files": None, "data-roots": None},
"val": {"ann-files": None, "data-roots": None},
"test": {"ann-files": None, "data-roots": None},
"unlabeled": {"file-list": None, "data-roots": None},
}
}
@e2e_pytest_unit
def test_export_data_cfg(self, mocker, config_manager):
# Mock data
data_cfg = {
"data": {
"train": {"ann-files": "path/to/train/ann", "data-roots": "path/to/train/images"},
"val": {"ann-files": "path/to/val/ann", "data-roots": "path/to/val/images"},
"test": {"ann-files": "path/to/test/ann", "data-roots": "path/to/test/images"},
"unlabeled": {"file-list": "path/to/unlabeled/files", "data-roots": "path/to/unlabeled/images"},
}
}
# Create temporary file
with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file:
output_path = temp_file.name
# Mock write_text function
mock_write_text = mocker.patch("pathlib.Path.write_text")
# Test the function
config_manager._export_data_cfg(data_cfg, output_path)
# Assertions
mock_write_text.assert_called_once_with(OmegaConf.to_yaml(data_cfg), encoding="utf-8")
@e2e_pytest_unit
def test_build_workspace(self, mocker):
# Setup
task_type = "CLASSIFICATION"
train_type = "Semisupervised"
workspace_path = "./otx-workspace"
args = mocker.Mock()
args.autosplit = None
args.workspace = workspace_path
config_manager = ConfigManager(args)
template = self.get_default_template(config_manager.otx_root, task_type)
config_manager.template = template
config_manager.train_type = train_type
config_manager.task_type = task_type
pathlib_mkdir_mock = mocker.patch("pathlib.Path.mkdir")
pathlib_exists_mock = mocker.patch("pathlib.Path.exists", return_value=True)
set_workspace_mock = mocker.patch("otx.cli.manager.config_manager.set_workspace", return_value=workspace_path)
parse_model_template_mock = mocker.patch(
"otx.cli.manager.config_manager.parse_model_template", return_value=template
)
gen_params_dict_from_args_mock = mocker.patch(
"otx.cli.manager.config_manager.gen_params_dict_from_args", return_value={}
)
omageconf_load_mock = mocker.patch("otx.cli.manager.config_manager.OmegaConf.load", return_value=template)
omageconf_merge_mock = mocker.patch("otx.cli.manager.config_manager.OmegaConf.merge", return_value=template)
path_write_text_mock = mocker.patch("otx.cli.manager.config_manager.Path.write_text")
config_manager_check_workspace_mock = mocker.patch(
"otx.cli.manager.config_manager.ConfigManager.check_workspace", return_value=False
)
config_manager_copy_config_files_mock = mocker.patch(
"otx.cli.manager.config_manager.ConfigManager._copy_config_files"
)
shutil_copyfile_mock = mocker.patch("shutil.copyfile")
# Run
config_manager.build_workspace()
# Check
set_workspace_mock.assert_called_once_with(task=task_type)
parse_model_template_mock.assert_called_once_with(str(config_manager.workspace_root / "template.yaml"))
# Calls
pathlib_mkdir_mock.assert_called()
pathlib_exists_mock.assert_called()
gen_params_dict_from_args_mock.assert_called_once_with(args)
omageconf_load_mock.assert_called()
omageconf_merge_mock.assert_called()
path_write_text_mock.assert_called()
config_manager_check_workspace_mock.assert_called()
config_manager_copy_config_files_mock.assert_called()
shutil_copyfile_mock.assert_called()
@e2e_pytest_unit
def test_update_data_config(self, config_manager, tmp_dir_path):
data_yaml = {
"data": {
"train": {"data-roots": "/path/to/train/data", "ann-files": None},
"val": {"data-roots": "/path/to/val/data", "ann-files": None},
"test": {"data-roots": "/path/to/test/data", "ann-files": None},
"unlabeled": {"data-roots": "/path/to/unlabeled/data", "file-list": "/path/to/unlabeled/filelist"},
}
}
data_yaml_path = tmp_dir_path / "data.yaml"
OmegaConf.save(data_yaml, str(data_yaml_path))
config_manager.update_data_config(OmegaConf.load(str(data_yaml_path)))
assert config_manager.data_config == {
"train_subset": {"data_roots": "/path/to/train/data", "ann_files": None},
"val_subset": {
"data_roots": "/path/to/val/data",
"ann_files": None,
},
"test_subset": {
"data_roots": "/path/to/test/data",
"ann_files": None,
},
"unlabeled_subset": {
"data_roots": "/path/to/unlabeled/data",
"file_list": "/path/to/unlabeled/filelist",
},
}
data_yaml["data"]["train"]["data-roots"] = "/path/to/train2/data"
data_yaml_path = tmp_dir_path / "data.yaml"
OmegaConf.save(data_yaml, str(data_yaml_path))
config_manager.update_data_config(OmegaConf.load(str(data_yaml_path)))
assert config_manager.data_config == {
"train_subset": {"data_roots": "/path/to/train2/data", "ann_files": None},
"val_subset": {"data_roots": "/path/to/val/data", "ann_files": None},
"test_subset": {"data_roots": "/path/to/test/data", "ann_files": None},
"unlabeled_subset": {
"data_roots": "/path/to/unlabeled/data",
"file_list": "/path/to/unlabeled/filelist",
},
}
data_yaml_path = tmp_dir_path / "data.yaml"
data_yaml["data"].pop("unlabeled")
OmegaConf.save(data_yaml, str(data_yaml_path))
config_manager.update_data_config(OmegaConf.load(str(data_yaml_path)))
assert config_manager.data_config == {
"train_subset": {"data_roots": "/path/to/train2/data", "ann_files": None},
"val_subset": {"data_roots": "/path/to/val/data", "ann_files": None},
"test_subset": {"data_roots": "/path/to/test/data", "ann_files": None},
"unlabeled_subset": {
"data_roots": "/path/to/unlabeled/data",
"file_list": "/path/to/unlabeled/filelist",
},
}
@e2e_pytest_unit
def test_get_hyparams_config(self, mocker):
mock_hyper_parameters = {
"learning_rate": {
"type": "FLOAT",
"default_value": "0.01",
"max_value": "1.0",
"min_value": "0.0",
"affects_outcome_of": ["TRAINING"],
},
"batch_size": {
"type": "INTEGER",
"default_value": "16",
"max_value": "128",
"min_value": "1",
"affects_outcome_of": ["TRAINING", "TESTING"],
},
}
mock_template = DictConfig({"hyper_parameters": DictConfig({"data": mock_hyper_parameters})})
parser = argparse.ArgumentParser()
parser.add_argument("--template")
parser.add_argument(
"--learning_rate",
dest="params.learning_rate",
)
parser.add_argument(
"--batch_size",
dest="params.batch_size",
)
mock_input = ["--learning_rate", "0.5", "--batch_size", "8"]
mock_args = parser.parse_args(mock_input)
expected_hyper_parameters = {
"learning_rate": {
"type": "FLOAT",
"default_value": "0.01",
"max_value": "1.0",
"min_value": "0.0",
"affects_outcome_of": ["TRAINING"],
"value": "0.5",
},
"batch_size": {
"type": "INTEGER",
"default_value": "16",
"max_value": "128",
"min_value": "1",
"affects_outcome_of": ["TRAINING", "TESTING"],
"value": "8",
},
}
mock_create = mocker.patch("otx.cli.manager.config_manager.create", return_value=expected_hyper_parameters)
config_manager = ConfigManager(mock_args)
config_manager.template = mock_template
config_manager.get_hyparams_config()
mock_create.assert_called_once_with(expected_hyper_parameters)
@e2e_pytest_unit
def test_data_config_file_path(self, mocker, tmp_dir_path):
parser = argparse.ArgumentParser()
parser.add_argument("--template")
parser.add_argument("--data")
args = parser.parse_args([])
config_manager = ConfigManager(args)
# set up test workspace
workspace_root = tmp_dir_path / "test_data_config"
config_manager.workspace_root = workspace_root
# workspace_root.mkdir(exist_ok=True, parents=True)
assert config_manager.data_config_file_path == workspace_root / "data.yaml"
# expected file path
mock_exists = mocker.patch("otx.cli.manager.config_manager.Path.exists", return_value=False)
expected_file_path = tmp_dir_path / "data.yaml"
args = parser.parse_args(["--data", str(expected_file_path)])
config_manager.args = args
with pytest.raises(FileNotExistError):
config_manager.data_config_file_path
mock_exists.return_value = True
assert config_manager.data_config_file_path == expected_file_path
@e2e_pytest_unit
def test_configure_template(self, mocker):
# Given
mock_args = mocker.MagicMock()
mock_args.train_data_roots = ["/path/to/train/data"]
mock_args.template = None
mock_workspace_root = mocker.MagicMock()
mock_workspace_root.exists.return_value = True
mock_template = DictConfig({"name": "template_name", "task_type": "CLASSIFICATION"})
mock_check_workspace = mocker.patch(
"otx.cli.manager.config_manager.ConfigManager.check_workspace", return_value=True
)
mocker.patch("otx.cli.manager.config_manager.ConfigManager._get_template", return_value=mock_template)
mocker.patch("otx.cli.manager.config_manager.ConfigManager._get_train_type", return_value="Incremental")
mock_parse_model_template = mocker.patch(
"otx.cli.manager.config_manager.parse_model_template", return_value=mock_template
)
config_manager = ConfigManager(args=mock_args, workspace_root=mock_workspace_root)
# When
config_manager.configure_template()
# Then
assert config_manager.task_type == "CLASSIFICATION"
assert config_manager.model == "template_name"
assert config_manager.train_type == "Incremental"
config_manager.mode = "build"
mocker.patch("otx.cli.manager.config_manager.ConfigManager._check_rebuild", return_value=True)
config_manager.configure_template()
assert config_manager.rebuild
assert config_manager.task_type == "CLASSIFICATION"
assert config_manager.model == "template_name"
assert config_manager.train_type == "Incremental"
mock_check_workspace.return_value = False
mocker.patch("pathlib.Path.exists", return_value=True)
config_manager.template = "test/template"
config_manager.configure_template()
mock_parse_model_template.assert_called_with("test/template")
config_manager.template = None
config_manager.task_type = None
mock_check_workspace = mocker.patch(
"otx.cli.manager.config_manager.ConfigManager.auto_task_detection", return_value="CLASSIFICATION"
)
config_manager.configure_template()
assert config_manager.task_type == "CLASSIFICATION"
assert config_manager.model == "template_name"
assert config_manager.train_type == "Incremental"
mocker.patch("otx.cli.manager.config_manager.ConfigManager.check_workspace", return_value=False)
mocker.patch("otx.cli.manager.config_manager.hasattr", return_value=False)
empty_args = mocker.MagicMock()
empty_args.template = None
config_manager = ConfigManager(args=empty_args, mode="train")
config_manager.template = None
config_manager.task_type = None
with pytest.raises(ConfigValueError, match="Can't find the argument 'train_data_roots'"):
config_manager.configure_template()
config_manager = ConfigManager(args=empty_args, mode="eval")
with pytest.raises(ConfigValueError, match="No appropriate template or task-type was found."):
config_manager.configure_template()
@e2e_pytest_unit
def test__check_rebuild(self, mocker):
mock_template = mocker.MagicMock()
mock_template.task_type = "CLASSIFICATION"
mock_args = mocker.MagicMock()
mock_args.mode = "build"
mock_args.task = "DETECTION"
mock_args.template = mock_template
config_manager = ConfigManager(mock_args)
with pytest.raises(NotSupportedError):
config_manager._check_rebuild()
config_manager.template.task_type = "DETECTION"
config_manager.args.model = None
config_manager.args.train_type = ""
assert not config_manager._check_rebuild()
config_manager.args.model = "SSD"
config_manager.template.name = "MobileNetV2-ATSS"
config_manager.args.train_type = "Semisupervised"
assert config_manager._check_rebuild()
@e2e_pytest_unit
@pytest.mark.parametrize("mode", ["build", "train", "eval"])
@pytest.mark.parametrize("task_type", ["", "VISUAL_PROMPTING"])
def test_configure_data_config(self, mocker, mode: str, task_type: str):
data_yaml = {
"data": {
"train": {"ann-files": None, "data-roots": "train/data/roots"},
"val": {"ann-files": None, "data-roots": None},
"test": {"ann-files": None, "data-roots": None},
"unlabeled": {"file-list": None, "data-roots": None},
}
}
mock_configure_dataset = mocker.patch(
"otx.cli.manager.config_manager.configure_dataset", return_value=data_yaml
)
mock_auto_split = mocker.patch("otx.cli.manager.config_manager.ConfigManager.auto_split_data", return_value={})
mock_get_data_yaml = mocker.patch(
"otx.cli.manager.config_manager.ConfigManager._get_arg_data_yaml", return_value=data_yaml
)
mock_save_data = mocker.patch("otx.cli.manager.config_manager.ConfigManager._save_data")
mock_export_data_cfg = mocker.patch("otx.cli.manager.config_manager.ConfigManager._export_data_cfg")
mock_update_data_config = mocker.patch("otx.cli.manager.config_manager.ConfigManager.update_data_config")
mock_args = mocker.MagicMock()
mock_args.mode = mode
config_manager = ConfigManager(mock_args)
config_manager.train_type = "Incremental"
config_manager.task_type = task_type
config_manager.mode = mode
config_manager.configure_data_config(update_data_yaml=True)
mock_configure_dataset.assert_called_once()
mock_export_data_cfg.assert_called_once()
mock_update_data_config.assert_called_once_with(data_yaml)
if mode in ("train", "build"):
mock_auto_split.assert_called_once()
mock_get_data_yaml.assert_called_once()
mock_save_data.assert_called_once()
else:
# mode == "eval"
mock_auto_split.assert_not_called()
mock_get_data_yaml.assert_not_called()
mock_save_data.assert_not_called()
if task_type == "VISUAL_PROMPTING" and mode == "train":
assert data_yaml.get("options", False)
@e2e_pytest_unit
def test__get_train_type_incremental(self, mocker):
"""General usage"""
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
config_manager.mode = "build"
config_manager.args.train_type = "Incremental"
assert config_manager._get_train_type() == "Incremental"
# test train_type unlabeled root is None
# train data root ordinary dataset folder
config_manager.args.train_type = None
config_manager.args.unlabeled_data_roots = None
config_manager.args.train_data_roots = "tests/assets/classification_dataset"
config_manager.args.val_data_roots = "tests/assets/classification_dataset"
assert config_manager._get_train_type(ignore_args=False) == "Incremental"
mock_template = mocker.MagicMock()
mock_template.hyper_parameters.parameter_overrides = {
"algo_backend": {"train_type": {"default_value": "Incremental"}}
}
config_manager.template = mock_template
assert config_manager._get_train_type(ignore_args=True) == "Incremental"
config_manager.template.hyper_parameters.parameter_overrides = {}
assert config_manager._get_train_type(ignore_args=True) == "Incremental"
# train_data_roots isn't exist
config_manager.args.train_type = None
config_manager.args.train_data_roots = "non_exist_dir"
with pytest.raises(ValueError):
config_manager._get_train_type(ignore_args=False)
# test val_data_roots is None, train-data-roots contains full dataset format
config_manager.args.val_data_roots = None
config_manager.args.train_data_roots = "tests/assets/classification_dataset"
# auto-split
assert config_manager._get_train_type(ignore_args=False) == "Incremental"
@e2e_pytest_unit
def test__get_train_type_semisuprvised(self, mocker):
"""Auto train type detection"""
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
config_manager.args.train_data_roots = "tests/assets/classification_dataset"
config_manager.args.train_type = "Semisupervised"
assert config_manager._get_train_type() == "Semisupervised"
# test train_type unlabeled root is not None
config_manager.args.train_type = None
config_manager.args.unlabeled_data_roots = "tests/assets/unlabeled_dataset/a"
assert config_manager._get_train_type(ignore_args=False) == "Semisupervised"
# test train_type unlabeled root is not exist
config_manager.args.unlabeled_data_roots = "non_exist_dir"
with pytest.raises(ValueError):
config_manager._get_train_type(ignore_args=False)
tempdir = tempfile.mkdtemp()
# unlabeled root is empty
config_manager.args.unlabeled_data_roots = str(tempdir)
with pytest.raises(ValueError):
config_manager._get_train_type(ignore_args=False)
Path(f"{tempdir}/file.jpg").touch()
# number of images in unlabeled root is unsufficient
assert config_manager._get_train_type(ignore_args=False) == "Incremental"
Path(f"{tempdir}/file1.jpg").touch()
Path(f"{tempdir}/file2.jpg").touch()
assert config_manager._get_train_type(ignore_args=False) == "Semisupervised"
@e2e_pytest_unit
def test__get_train_type_selfsupervised(self, mocker):
"""Auto train type detection"""
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
config_manager.args.train_type = "Selfsupervised"
assert config_manager._get_train_type() == "Selfsupervised"
config_manager.args.train_type = None
config_manager.args.unlabeled_data_roots = None
# test folder with only images
config_manager.args.train_data_roots = "tests/assets/unlabeled_dataset/a"
config_manager.args.val_data_roots = None
assert config_manager._get_train_type(ignore_args=False) == "Selfsupervised"
# test val_data_roots is not None
config_manager.args.val_data_roots = "tests/assets/unlabeled_dataset"
assert config_manager._get_train_type(ignore_args=False) == "Selfsupervised"
@e2e_pytest_unit
def test_auto_task_detection(self, mocker):
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
with pytest.raises(CliException):
config_manager.auto_task_detection("")
mock_get_data_format = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.get_data_format", return_value="Unexpected"
)
with pytest.raises(ConfigValueError):
config_manager.auto_task_detection("data/roots")
mock_get_data_format.return_value = "coco"
assert config_manager.auto_task_detection("data/roots") == "DETECTION"
@e2e_pytest_unit
def test_auto_split_data(self, mocker):
mock_get_data_format = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.get_data_format", return_value="coco"
)
mock_import_dataset = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.import_dataset", return_value=None
)
mock_get_train_dataset = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.get_train_dataset", return_value="train_dataset"
)
mock_get_val_dataset = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.get_val_dataset", return_value="val_dataset"
)
mock_auto_split = mocker.patch(
"otx.cli.manager.config_manager.DatasetManager.auto_split",
return_value={"train": "auto_train", "val": "auto_val"},
)
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
assert config_manager.auto_split_data("test_data_root", task="DETECTION") == {
"train": "train_dataset",
"val": "val_dataset",
}
mock_get_val_dataset.return_value = None
assert config_manager.auto_split_data("test_data_root", task="DETECTION") == {
"train": "auto_train",
"val": "auto_val",
}
mock_get_data_format.return_value = "Unexpected"
assert config_manager.auto_split_data("test_data_root", task="DETECTION") is None
mock_get_data_format.assert_called()
mock_import_dataset.assert_called()
mock_get_train_dataset.assert_called()
mock_get_val_dataset.assert_called()
mock_auto_split.assert_called()
@e2e_pytest_unit
def test_get_dataset_config(self, mocker):
mock_args = mocker.MagicMock()
config_manager = ConfigManager(args=mock_args)
config_manager.task_type = "DETECTION"
config_manager.data_config = {
"train_subset": {"data_roots": "train_path"},
"val_subset": {"data_roots": "val_path"},
"test_subset": {"data_roots": "test_path"},
}
dataset_config = config_manager.get_dataset_config(["train", "val", "test"])
assert dataset_config["task_type"] == "DETECTION"
assert "train_data_roots" in dataset_config
assert "val_data_roots" in dataset_config
assert "test_data_roots" in dataset_config
class TestConfigManagerEncryptionKey:
encryption_key = "dummy_key"
@pytest.fixture(scope="function")
def fxt_with_args(self, config_manager):
config_manager.args.encryption_key = self.encryption_key
return config_manager
@pytest.fixture
def fxt_with_envs(self, config_manager, mocker):
config_manager.args.encryption_key = None
k = mocker.patch.dict(os.environ, {"ENCRYPTION_KEY": self.encryption_key})
yield config_manager
@pytest.fixture
def fxt_with_both(self, fxt_with_args, mocker):
k = mocker.patch.dict(os.environ, {"ENCRYPTION_KEY": self.encryption_key})
yield fxt_with_args
@e2e_pytest_unit
@pytest.mark.parametrize(
"testcase, expected",
[("fxt_with_args", True), ("fxt_with_envs", True), ("config_manager", False)],
)
def test_encryption_key(self, testcase, expected, request):
config_manager = request.getfixturevalue(testcase)
actual = config_manager.encryption_key == self.encryption_key
assert actual == expected
@e2e_pytest_unit
def test_encryption_key_error_raise(self, fxt_with_both):
with pytest.raises(ValueError):
assert fxt_with_both.encryption_key == self.encryption_key
|
d2186b7d41ab424d6ced64780edd84fe8ff75533
|
dec5a11d95f7b87da9985362dca4dc573a8bd805
|
/tdda/referencetest/examples/exercises-pytest/exercise2/conftest.py
|
cfd48b45f6acf33577741a986af16756498b9220
|
[
"MIT"
] |
permissive
|
tdda/tdda
|
37d37e51c13362e65af07fe81708bb126fa568eb
|
08e1ec6d7397f2b0f527ac59698180ba54e53814
|
refs/heads/master
| 2023-02-19T00:08:40.983473
| 2023-02-10T19:49:27
| 2023-02-10T19:49:27
| 58,143,323
| 275
| 34
|
MIT
| 2019-07-02T14:44:33
| 2016-05-05T16:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
conftest.py
|
# -*- coding: utf-8 -*-
"""
conftest.py: example pytest configuration for tdda.referencetest
Source repository: http://github.com/tdda/tdda
License: MIT
Copyright (c) Stochastic Solutions Limited 2016-2019
"""
from tdda.referencetest.pytestconfig import (pytest_addoption,
pytest_collection_modifyitems,
set_default_data_location,
ref)
|
55c277fc0639241a9e296f340ff03b809b90a623
|
77f85a550c28212071067cb122ebfd93eb705190
|
/pde/tools/cuboid.py
|
f3ed8e4d386181b796c105dbe5bbfaf1abc6639b
|
[
"MIT"
] |
permissive
|
zwicker-group/py-pde
|
baf215a733508fe86093ea9e818228bbb3b34c58
|
d9c931a8361eaf27bc3766daba26edc11756b5f5
|
refs/heads/master
| 2023-08-31T06:36:34.514617
| 2023-08-30T18:15:44
| 2023-08-30T18:15:44
| 242,093,001
| 327
| 45
|
MIT
| 2023-08-31T13:16:24
| 2020-02-21T08:42:23
|
Python
|
UTF-8
|
Python
| false
| false
| 8,385
|
py
|
cuboid.py
|
"""
An n-dimensional, axes-aligned cuboid
This module defines the :class:`Cuboid` class, which represents an n-dimensional
cuboid that is aligned with the axes of a Cartesian coordinate system.
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import itertools
from typing import List, Tuple
import numpy as np
from .typing import FloatNumerical
class Cuboid:
"""class that represents a cuboid in :math:`n` dimensions"""
def __init__(self, pos, size, mutable: bool = True):
"""defines a cuboid from a position and a size vector
Args:
pos (list):
The position of the lower left corner. The length of this list
determines the dimensionality of space
size (list):
The size of the cuboid along each dimension.
mutable (bool):
Flag determining whether the cuboid parameters can be changed
"""
self._mutable = mutable
# set position and adjust mutable status later
self.pos = np.array(pos, copy=True)
self.size = size # implicitly sets correct shape
self.pos.flags.writeable = self.mutable
@property
def size(self) -> np.ndarray:
return self._size
@size.setter
def size(self, value: FloatNumerical):
self._size = np.array(value, self.pos.dtype) # make copy
if self.pos.shape != self._size.shape:
raise ValueError(
f"Size vector (dim={len(self._size)}) must have the same "
f"dimension as the position vector (dim={len(self.pos)})"
)
# flip Cuboid with negative size
neg = self._size < 0
self.pos[neg] += self._size[neg]
self._size = np.abs(self._size)
self._size.flags.writeable = self.mutable
@property
def corners(self) -> Tuple[np.ndarray, np.ndarray]:
"""return coordinates of two extreme corners defining the cuboid"""
return np.copy(self.pos), self.pos + self.size
@property
def mutable(self) -> bool:
return self._mutable
@mutable.setter
def mutable(self, value: bool):
self._mutable = bool(value)
self.pos.flags.writeable = self._mutable
self._size.flags.writeable = self._mutable
@classmethod
def from_points(cls, p1: np.ndarray, p2: np.ndarray, **kwargs) -> "Cuboid":
"""create cuboid from two points
Args:
p1 (list): Coordinates of first corner point
p2 (list): Coordinates of second corner point
Returns:
Cuboid: cuboid with positive size
"""
p1 = np.asarray(p1)
p2 = np.asarray(p2)
return cls(p1, p2 - p1, **kwargs)
@classmethod
def from_bounds(cls, bounds: np.ndarray, **kwargs) -> "Cuboid":
"""create cuboid from bounds
Args:
bounds (list): Two dimensional array of axes bounds
Returns:
Cuboid: cuboid with positive size
"""
bounds = np.asarray(bounds).reshape(-1, 2)
return cls(bounds[:, 0], bounds[:, 1] - bounds[:, 0], **kwargs)
@classmethod
def from_centerpoint(
cls, centerpoint: np.ndarray, size: np.ndarray, **kwargs
) -> "Cuboid":
"""create cuboid from two points
Args:
centerpoint (list): Coordinates of the center
size (list): Size of the cuboid
Returns:
Cuboid: cuboid with positive size
"""
centerpoint = np.asarray(centerpoint)
size = np.asarray(size)
return cls(centerpoint - size / 2, size, **kwargs)
def copy(self) -> "Cuboid":
return self.__class__(self.pos, self.size)
def __repr__(self):
return "{cls}(pos={pos}, size={size})".format(
cls=self.__class__.__name__, pos=self.pos, size=self.size
)
def __add__(self, other: "Cuboid") -> "Cuboid":
"""The sum of two cuboids is the minimal cuboid enclosing both"""
if isinstance(other, Cuboid):
if self.dim != other.dim:
raise RuntimeError("Incompatible dimensions")
a1, a2 = self.corners
b1, b2 = other.corners
return self.__class__.from_points(np.minimum(a1, b1), np.maximum(a2, b2))
else:
return NotImplemented
def __eq__(self, other) -> bool:
"""override the default equality test"""
if not isinstance(other, self.__class__):
return NotImplemented
return np.all(self.pos == other.pos) and np.all(self.size == other.size) # type: ignore
@property
def dim(self) -> int:
return len(self.pos)
@property
def bounds(self) -> Tuple[Tuple[float, float], ...]:
return tuple((p, p + s) for p, s in zip(self.pos, self.size))
@property
def vertices(self) -> List[List[float]]:
"""return the coordinates of all the corners"""
return list(itertools.product(*self.bounds)) # type: ignore
@property
def diagonal(self) -> float:
"""returns the length of the diagonal"""
return np.linalg.norm(self.size) # type: ignore
@property
def surface_area(self) -> float:
"""surface area of a cuboid in :math:`n` dimensions.
The surface area is the volume of the (:math:`n-1`)-dimensional
hypercubes that bound the current cuboid:
* :math:`n=1`: the number of end points (2)
* :math:`n=2`: the perimeter of the rectangle
* :math:`n=3`: the surface area of the cuboid
"""
sides = self.size
null = sides == 0
null_count = null.sum()
if null_count == 0:
return 2 * np.sum(np.prod(sides) / sides) # type: ignore
elif null_count == 1:
return 2 * np.prod(sides[~null]) # type: ignore
else:
return 0
@property
def centroid(self):
return self.pos + self.size / 2
@centroid.setter
def centroid(self, center):
self.pos[:] = np.asanyarray(center) - self.size / 2
@property
def volume(self) -> float:
return np.prod(self.size) # type: ignore
def buffer(self, amount: FloatNumerical = 0, inplace=False) -> "Cuboid":
"""dilate the cuboid by a certain amount in all directions"""
amount = np.asarray(amount)
if inplace:
self.pos -= amount
self.size += 2 * amount
return self
else:
return self.__class__(self.pos - amount, self.size + 2 * amount)
def contains_point(self, points: np.ndarray) -> np.ndarray:
"""returns a True when `points` are within the Cuboid
Args:
points (:class:`~numpy.ndarray`): List of point coordinates
Returns:
:class:`~numpy.ndarray`: list of booleans indicating which points are inside
"""
points = np.asarray(points)
if len(points) == 0:
return points
if points.shape[-1] != self.dim:
raise ValueError(
"Last dimension of `points` must agree with "
f"cuboid dimension {self.dim}"
)
c1, c2 = self.corners
return np.all(c1 <= points, axis=-1) & np.all(points <= c2, axis=-1) # type: ignore
def asanyarray_flags(data: np.ndarray, dtype=None, writeable: bool = True):
"""turns data into an array and sets the respective flags.
A copy is only made if necessary
Args:
data (:class:`~numpy.ndarray`): numpy array that whose flags are adjusted
dtype: the resulant dtype
writeable (bool): Flag determining whether the results is writable
Returns:
:class:`~numpy.ndarray`:
array with same data as `data` but with flags adjusted.
"""
try:
data_writeable = data.flags.writeable
except AttributeError:
# `data` did not have the writeable flag => it's not a numpy array
result = np.array(data, dtype)
else:
if data_writeable != writeable:
# need to make a copy because the flags differ
result = np.array(data, dtype)
else:
# might have to make a copy to adjust the dtype
result = np.asanyarray(data, dtype)
# set the flags and return the array
result.flags.writeable = writeable
return result
|
91502432fffccd0900a0ff3e913adb7d874d643e
|
730430ba3b45d5728ef044863598199bfa33aaaa
|
/parl/utils/np_utils.py
|
f83c8c42f24aa11e68b234ba7c95a86681254a6b
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PARL
|
062d1b4a5335553be6cdfc33ad12f07ebbcd7310
|
3bb5fe36d245f4d69bae0710dc1dc9d1a172f64d
|
refs/heads/develop
| 2023-08-09T02:12:39.741551
| 2023-05-19T17:52:25
| 2023-05-19T17:52:25
| 131,044,128
| 3,818
| 988
|
Apache-2.0
| 2023-07-28T03:59:20
| 2018-04-25T17:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
np_utils.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
__all__ = ['np_softmax', 'np_cross_entropy']
def np_softmax(logits):
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def np_cross_entropy(probs, labels):
if labels.shape[-1] == 1:
# sparse label
n_classes = probs.shape[-1]
result_shape = list(labels.shape[:-1]) + [n_classes]
labels = np.eye(n_classes)[labels.reshape(-1)]
labels = labels.reshape(result_shape)
return -np.sum(labels * np.log(probs), axis=-1, keepdims=True)
|
2fcc1c943336c62275cd4768d2caecb4d2d4789a
|
8319c9859bde5e21eba2ba60219ebe496646470b
|
/tests/whitebox/integration/_misc.py
|
7323fb547dbc18f4da87eb5584ce4f91baa33b87
|
[
"Apache-2.0"
] |
permissive
|
stratis-storage/stratis-cli
|
0be83c0903c1050ac3cf75a19121ba19be97c4a6
|
399c95edd7c37e5fb9494f7829d5355c011fb7d7
|
refs/heads/master
| 2023-08-31T23:24:02.710481
| 2023-08-30T20:18:02
| 2023-08-30T20:18:02
| 66,956,943
| 107
| 44
|
Apache-2.0
| 2023-09-08T18:25:33
| 2016-08-30T16:09:39
|
Python
|
UTF-8
|
Python
| false
| false
| 11
|
py
|
_misc.py
|
../_misc.py
|
aa229ccda567b095cb676ae6ade374d9cc761903
|
7c1f157acafec729d060847469a5cf36cd88e792
|
/cords/utils/data/datasets/SSL/augmentation/augmentation_pool.py
|
a231b5051bf856dfb2469536064a3b4439812ebf
|
[
"MIT"
] |
permissive
|
decile-team/cords
|
025415cc7b4577e01acba312908b9e12da27da9b
|
8d10c7f5d96e071f98c20e4e9ff4c41c2c4ea2af
|
refs/heads/main
| 2023-05-25T01:57:51.429546
| 2023-05-24T19:46:54
| 2023-05-24T19:46:54
| 330,041,216
| 289
| 54
|
MIT
| 2023-05-24T17:25:40
| 2021-01-15T23:02:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,397
|
py
|
augmentation_pool.py
|
import random
import torch
import torch.nn.functional as F
import numpy as np
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
"""
For PIL.Image
"""
def autocontrast(x, *args, **kwargs):
return ImageOps.autocontrast(x.convert("RGB")).convert("RGBA")
def brightness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Brightness(x).enhance(level)
def color(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Color(x).enhance(level)
def contrast(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Contrast(x).enhance(level)
def equalize(x, *args, **kwargs):
return ImageOps.equalize(x.convert("RGB")).convert("RGBA")
def identity(x, *args, **kwargs):
return x
def invert(x, *args, **kwargs):
return ImageOps.invert(x.convert("RGB")).convert("RGBA")
def posterize(x, level, magnitude=10, max_level=4, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.posterize(x.convert("RGB"), 4 - level).convert("RGBA")
def rotate(x, level, magnitude=10, max_level=30, *args, **kwargs):
degree = int((level / magnitude) * max_level)
if random.random() > 0.5:
degree = -degree
return x.rotate(degree)
def sharpness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Sharpness(x).enhance(level)
def shear_x(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
def shear_y(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
def solarize(x, level, magnitude=10, max_level=256, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.solarize(x.convert("RGB"), 256 - level).convert("RGBA")
def translate_x(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, level, 0, 1, 0))
def translate_y(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, level))
def cutout(x, level, magnitude=10, max_level=20, *args, **kwargs):
size = int((level / magnitude) * max_level)
if size <= 0:
return x
w, h = x.size
upper_coord, lower_coord = _gen_cutout_coord(h, w, size)
pixels = x.load()
for i in range(upper_coord[0], lower_coord[0]):
for j in range(upper_coord[1], lower_coord[1]):
pixels[i, j] = (127, 127, 127, 0)
return x
def _gen_cutout_coord(height, width, size):
height_loc = random.randint(0, height - 1)
width_loc = random.randint(0, width - 1)
upper_coord = (max(0, height_loc - size // 2),
max(0, width_loc - size // 2))
lower_coord = (min(height, height_loc + size // 2),
min(width, width_loc + size // 2))
return upper_coord, lower_coord
"""
For torch.Tensor
"""
class TorchCutout:
def __init__(self, size=16):
self.size = size
def __call__(self, img):
h, w = img.shape[-2:]
upper_coord, lower_coord = _gen_cutout_coord(h, w, self.size)
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = torch.ones_like(img)
zeros = torch.zeros((img.shape[0], mask_height, mask_width))
mask[:, upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1]] = zeros
return img * mask
def __repr__(self):
return f"TorchCutout(size={self.size})"
class GaussianNoise:
def __init__(self, std=0.15):
self.std = std
def __call__(self, x):
with torch.no_grad():
return x + torch.randn_like(x) * self.std
def __repr__(self):
return f"GaussianNoise(std={self.std})"
class BatchRandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
with torch.no_grad():
return torch.stack([
torch.flip(img, (-1,))
if random.random() > self.p
else img
for img in x
], 0)
def __repr__(self):
return f"BatchRandomFlip(flip_prob={self.p})"
class RandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
if random.random() > self.p:
return torch.flip(x, (-1,))
return x
def __repr__(self):
return f"RandomFlip(flip_prob={self.p})"
class BatchRandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
b, _, h, w = x.shape
x = F.pad(x, [self.pad for _ in range(4)], mode="reflect")
left, top = torch.randint(0, 1+self.pad*2, (b,)), torch.randint(0, 1+self.pad*2, (b,))
return torch.stack([
img[..., t:t+h, l:l+w]
for img, t, l in zip(x, left, top)
], 0)
def __repr__(self):
return f"BatchRandomCrop(padding={self.pad})"
class RandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
_, h, w = x.shape
x = F.pad(x[None], [self.pad for _ in range(4)], mode="reflect")
left, top = random.randint(0, self.pad*2), random.randint(0, self.pad*2)
return x[0, :, top:top+h, left:left+w]
def __repr__(self):
return f"RandomCrop(padding={self.pad})"
class ZCA:
def __init__(self, mean, scale):
self.mean = torch.from_numpy(mean).float()
self.scale = torch.from_numpy(scale).float()
def __call__(self, x):
c, h, w = x.shape
x = x.reshape(-1)
x = (x - self.mean) @ self.scale
return x.reshape(c, h, w)
def __repr__(self):
return f"ZCA()"
class GCN:
"""global contrast normalization"""
def __init__(self, multiplier=55, eps=1e-10):
self.multiplier = multiplier
self.eps = eps
def __call__(self, x):
x -= x.mean()
norm = x.norm(2)
norm[norm < self.eps] = 1
return self.multiplier * x / norm
def __repr__(self):
return f"GCN(multiplier={self.multiplier}, eps={self.eps})"
"""
For numpy.array
"""
def numpy_batch_gcn(images, multiplier=55, eps=1e-10):
# global contrast normalization
images = images.astype(np.float)
images -= images.mean(axis=(1,2,3), keepdims=True)
per_image_norm = np.sqrt(np.square(images).sum((1,2,3), keepdims=True))
per_image_norm[per_image_norm < eps] = 1
return multiplier * images / per_image_norm
|
5c22e1475c92a02f72fa90ec04f7b486917e37d9
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/cloud/alexa_config.py
|
e85c6dd277a19724ae93b392e711747aecb2ff94
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 17,679
|
py
|
alexa_config.py
|
"""Alexa configuration for Home Assistant Cloud."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
from datetime import datetime, timedelta
from http import HTTPStatus
import logging
from typing import TYPE_CHECKING, Any
import aiohttp
from hass_nabucasa import Cloud, cloud_api
from yarl import URL
from homeassistant.components import persistent_notification
from homeassistant.components.alexa import (
DOMAIN as ALEXA_DOMAIN,
config as alexa_config,
entities as alexa_entities,
errors as alexa_errors,
state_report as alexa_state_report,
)
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.homeassistant.exposed_entities import (
async_expose_entity,
async_get_assistant_settings,
async_listen_entity_updates,
async_should_expose,
)
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from homeassistant.core import Event, HomeAssistant, callback, split_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry as er, start
from homeassistant.helpers.entity import get_device_class
from homeassistant.helpers.entityfilter import EntityFilter
from homeassistant.helpers.event import async_call_later
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from .const import (
CONF_ENTITY_CONFIG,
CONF_FILTER,
DOMAIN as CLOUD_DOMAIN,
PREF_ALEXA_REPORT_STATE,
PREF_ENABLE_ALEXA,
PREF_SHOULD_EXPOSE,
)
from .prefs import ALEXA_SETTINGS_VERSION, CloudPreferences
if TYPE_CHECKING:
from .client import CloudClient
_LOGGER = logging.getLogger(__name__)
CLOUD_ALEXA = f"{CLOUD_DOMAIN}.{ALEXA_DOMAIN}"
# Time to wait when entity preferences have changed before syncing it to
# the cloud.
SYNC_DELAY = 1
SUPPORTED_DOMAINS = {
"alarm_control_panel",
"alert",
"automation",
"button",
"camera",
"climate",
"cover",
"fan",
"group",
"humidifier",
"image_processing",
"input_boolean",
"input_button",
"input_number",
"light",
"lock",
"media_player",
"number",
"scene",
"script",
"switch",
"timer",
"vacuum",
}
SUPPORTED_BINARY_SENSOR_DEVICE_CLASSES = {
BinarySensorDeviceClass.DOOR,
BinarySensorDeviceClass.GARAGE_DOOR,
BinarySensorDeviceClass.MOTION,
BinarySensorDeviceClass.OPENING,
BinarySensorDeviceClass.PRESENCE,
BinarySensorDeviceClass.WINDOW,
}
SUPPORTED_SENSOR_DEVICE_CLASSES = {
SensorDeviceClass.TEMPERATURE,
}
def entity_supported(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the entity is supported.
This is called when migrating from legacy config format to avoid exposing
all binary sensors and sensors.
"""
domain = split_entity_id(entity_id)[0]
if domain in SUPPORTED_DOMAINS:
return True
try:
device_class = get_device_class(hass, entity_id)
except HomeAssistantError:
# The entity no longer exists
return False
if (
domain == "binary_sensor"
and device_class in SUPPORTED_BINARY_SENSOR_DEVICE_CLASSES
):
return True
if domain == "sensor" and device_class in SUPPORTED_SENSOR_DEVICE_CLASSES:
return True
return False
class CloudAlexaConfig(alexa_config.AbstractConfig):
"""Alexa Configuration."""
def __init__(
self,
hass: HomeAssistant,
config: dict,
cloud_user: str,
prefs: CloudPreferences,
cloud: Cloud[CloudClient],
) -> None:
"""Initialize the Alexa config."""
super().__init__(hass)
self._config = config
self._cloud_user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._token = None
self._token_valid: datetime | None = None
self._cur_entity_prefs = async_get_assistant_settings(hass, CLOUD_ALEXA)
self._alexa_sync_unsub: Callable[[], None] | None = None
self._endpoint: str | URL | None = None
@property
def enabled(self) -> bool:
"""Return if Alexa is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.alexa_enabled
)
@property
def supports_auth(self) -> bool:
"""Return if config supports auth."""
return True
@property
def should_report_state(self) -> bool:
"""Return if states should be proactively reported."""
return (
self._prefs.alexa_enabled
and self._prefs.alexa_report_state
and self.authorized
)
@property
def endpoint(self) -> str | URL | None:
"""Endpoint for report state."""
if self._endpoint is None:
raise ValueError("No endpoint available. Fetch access token first")
return self._endpoint
@property
def locale(self) -> str:
"""Return config locale."""
# Not clear how to determine locale atm.
return "en-US"
@property
def entity_config(self) -> dict[str, Any]:
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@callback
def user_identifier(self) -> str:
"""Return an identifier for the user that represents this config."""
return self._cloud_user
def _migrate_alexa_entity_settings_v1(self) -> None:
"""Migrate alexa entity settings to entity registry options."""
if not self._config[CONF_FILTER].empty_filter:
# Don't migrate if there's a YAML config
return
for entity_id in {
*self.hass.states.async_entity_ids(),
*self._prefs.alexa_entity_configs,
}:
async_expose_entity(
self.hass,
CLOUD_ALEXA,
entity_id,
self._should_expose_legacy(entity_id),
)
async def async_initialize(self) -> None:
"""Initialize the Alexa config."""
await super().async_initialize()
async def on_hass_started(hass: HomeAssistant) -> None:
if self._prefs.alexa_settings_version != ALEXA_SETTINGS_VERSION:
_LOGGER.info(
"Start migration of Alexa settings from v%s to v%s",
self._prefs.alexa_settings_version,
ALEXA_SETTINGS_VERSION,
)
if self._prefs.alexa_settings_version < 2 or (
# Recover from a bug we had in 2023.5.0 where entities didn't get exposed
self._prefs.alexa_settings_version < 3
and not any(
settings.get("should_expose", False)
for settings in async_get_assistant_settings(
hass, CLOUD_ALEXA
).values()
)
):
self._migrate_alexa_entity_settings_v1()
_LOGGER.info(
"Finished migration of Alexa settings from v%s to v%s",
self._prefs.alexa_settings_version,
ALEXA_SETTINGS_VERSION,
)
await self._prefs.async_update(
alexa_settings_version=ALEXA_SETTINGS_VERSION
)
async_listen_entity_updates(
self.hass, CLOUD_ALEXA, self._async_exposed_entities_updated
)
async def on_hass_start(hass: HomeAssistant) -> None:
if self.enabled and ALEXA_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, ALEXA_DOMAIN, {})
start.async_at_start(self.hass, on_hass_start)
start.async_at_started(self.hass, on_hass_started)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
er.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def _should_expose_legacy(self, entity_id: str) -> bool:
"""If an entity should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
entity_configs = self._prefs.alexa_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose: bool | None = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
entity_registry = er.async_get(self.hass)
if registry_entry := entity_registry.async_get(entity_id):
auxiliary_entity = (
registry_entry.entity_category is not None
or registry_entry.hidden_by is not None
)
else:
auxiliary_entity = False
# Backwards compat
if (default_expose := self._prefs.alexa_default_expose) is None:
return not auxiliary_entity and entity_supported(self.hass, entity_id)
return (
not auxiliary_entity
and split_entity_id(entity_id)[0] in default_expose
and entity_supported(self.hass, entity_id)
)
@callback
def should_expose(self, entity_id: str) -> bool:
"""If an entity should be exposed."""
entity_filter: EntityFilter = self._config[CONF_FILTER]
if not entity_filter.empty_filter:
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
return entity_filter(entity_id)
return async_should_expose(self.hass, CLOUD_ALEXA, entity_id)
@callback
def async_invalidate_access_token(self) -> None:
"""Invalidate access token."""
self._token_valid = None
async def async_get_access_token(self) -> str | None:
"""Get an access token."""
if self._token_valid is not None and self._token_valid > utcnow():
return self._token
resp = await cloud_api.async_alexa_access_token(self._cloud)
body = await resp.json()
if resp.status == HTTPStatus.BAD_REQUEST:
if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"):
if self.should_report_state:
persistent_notification.async_create(
self.hass,
(
"There was an error reporting state to Alexa"
f" ({body['reason']}). Please re-link your Alexa skill via"
" the Alexa app to continue using it."
),
"Alexa state reporting disabled",
"cloud_alexa_report",
)
raise alexa_errors.RequireRelink
raise alexa_errors.NoTokenAvailable
self._token = body["access_token"]
self._endpoint = body["event_endpoint"]
self._token_valid = utcnow() + timedelta(seconds=body["expires_in"])
return self._token
async def _async_prefs_updated(self, prefs: CloudPreferences) -> None:
"""Handle updated preferences."""
if not self._cloud.is_logged_in:
if self.is_reporting_states:
await self.async_disable_proactive_mode()
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
return
updated_prefs = prefs.last_updated
if (
ALEXA_DOMAIN not in self.hass.config.components
and self.enabled
and self.hass.is_running
):
await async_setup_component(self.hass, ALEXA_DOMAIN, {})
if self.should_report_state != self.is_reporting_states:
if self.should_report_state:
try:
await self.async_enable_proactive_mode()
except (alexa_errors.NoTokenAvailable, alexa_errors.RequireRelink):
await self.set_authorized(False)
else:
await self.async_disable_proactive_mode()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities()
return
# Nothing to do if no Alexa related things have changed
if not any(
key in updated_prefs
for key in (
PREF_ALEXA_REPORT_STATE,
PREF_ENABLE_ALEXA,
)
):
return
await self.async_sync_entities()
@callback
def _async_exposed_entities_updated(self) -> None:
"""Handle updated preferences."""
# Delay updating as we might update more
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._sync_prefs
)
async def _sync_prefs(self, _now: datetime) -> None:
"""Sync the updated preferences to Alexa."""
self._alexa_sync_unsub = None
old_prefs = self._cur_entity_prefs
new_prefs = async_get_assistant_settings(self.hass, CLOUD_ALEXA)
seen = set()
to_update = []
to_remove = []
is_enabled = self.enabled
for entity_id, info in old_prefs.items():
seen.add(entity_id)
if not is_enabled:
to_remove.append(entity_id)
old_expose = info.get(PREF_SHOULD_EXPOSE)
if entity_id in new_prefs:
new_expose = new_prefs[entity_id].get(PREF_SHOULD_EXPOSE)
else:
new_expose = None
if old_expose == new_expose:
continue
if new_expose:
to_update.append(entity_id)
else:
to_remove.append(entity_id)
# Now all the ones that are in new prefs but never were in old prefs
for entity_id, info in new_prefs.items():
if entity_id in seen:
continue
new_expose = info.get(PREF_SHOULD_EXPOSE)
if new_expose is None:
continue
# Only test if we should expose. It can never be a remove action,
# as it didn't exist in old prefs object.
if new_expose:
to_update.append(entity_id)
# We only set the prefs when update is successful, that way we will
# retry when next change comes in.
if await self._sync_helper(to_update, to_remove):
self._cur_entity_prefs = new_prefs
async def async_sync_entities(self) -> bool:
"""Sync all entities to Alexa."""
# Remove any pending sync
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
to_update = []
to_remove = []
is_enabled = self.enabled
for entity in alexa_entities.async_get_entities(self.hass, self):
if is_enabled and self.should_expose(entity.entity_id):
to_update.append(entity.entity_id)
else:
to_remove.append(entity.entity_id)
return await self._sync_helper(to_update, to_remove)
async def _sync_helper(self, to_update: list[str], to_remove: list[str]) -> bool:
"""Sync entities to Alexa.
Return boolean if it was successful.
"""
if not to_update and not to_remove:
return True
# Make sure it's valid.
await self.async_get_access_token()
tasks = []
if to_update:
tasks.append(
asyncio.create_task(
alexa_state_report.async_send_add_or_update_message(
self.hass, self, to_update
)
)
)
if to_remove:
tasks.append(
asyncio.create_task(
alexa_state_report.async_send_delete_message(
self.hass, self, to_remove
)
)
)
try:
async with asyncio.timeout(10):
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return True
except asyncio.TimeoutError:
_LOGGER.warning("Timeout trying to sync entities to Alexa")
return False
except aiohttp.ClientError as err:
_LOGGER.warning("Error trying to sync entities to Alexa: %s", err)
return False
async def _handle_entity_registry_updated(self, event: Event) -> None:
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
entity_id = event.data["entity_id"]
if not self.should_expose(entity_id):
return
action = event.data["action"]
to_update = []
to_remove = []
if action == "create":
to_update.append(entity_id)
elif action == "remove":
to_remove.append(entity_id)
elif action == "update" and bool(
set(event.data["changes"]) & er.ENTITY_DESCRIBING_ATTRIBUTES
):
to_update.append(entity_id)
if "old_entity_id" in event.data:
to_remove.append(event.data["old_entity_id"])
with suppress(alexa_errors.NoTokenAvailable):
await self._sync_helper(to_update, to_remove)
|
460cbc0bdb7be4d3bb15b4e671b5aecd2dc9a111
|
ef884ae6ca9c293bd73432f40794a05815299142
|
/tests/conftest.py
|
f2de231215d45e2ab25c169ea0d3cee5689afea4
|
[
"MIT"
] |
permissive
|
9seconds/concierge
|
4574fa04f72cc4f7e0cdefcdfed6ac37f47287ea
|
40b0de3e68354cd06461763b228d8901bc4c2d12
|
refs/heads/master
| 2020-12-25T00:39:35.017152
| 2020-12-16T12:30:50
| 2020-12-16T12:30:50
| 52,885,382
| 126
| 15
|
MIT
| 2020-12-16T12:30:51
| 2016-03-01T15:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
conftest.py
|
# -*- coding: utf-8 -*-
import os
import os.path
import shutil
import sys
import unittest.mock
import inotify_simple
import pytest
import concierge
import concierge.notifications
import concierge.templater
def have_mocked(request, *mock_args, **mock_kwargs):
if len(mock_args) > 1:
method = unittest.mock.patch.object
else:
method = unittest.mock.patch
patch = method(*mock_args, **mock_kwargs)
mocked = patch.start()
request.addfinalizer(patch.stop)
return mocked
@pytest.fixture
def no_sleep(monkeypatch):
monkeypatch.setattr("time.sleep", lambda arg: arg)
@pytest.fixture
def mock_get_content(request):
return have_mocked(request, "concierge.utils.get_content")
@pytest.fixture(scope="session", autouse=True)
def mock_logger(request):
return have_mocked(request, "concierge.utils.logger")
@pytest.fixture(autouse=True)
def mock_log_configuration(request):
have_mocked(request, "socket.socket") # required for SysLogHandler
marker = request.node.get_marker("no_mock_log_configuration")
if not marker:
return have_mocked(request, "concierge.utils.configure_logging")
@pytest.fixture(autouse=True)
def mock_notificatior(request, monkeypatch):
marker = request.node.get_marker("no_mock_notificatior")
if not marker:
monkeypatch.setattr(
concierge.notifications,
"notifier",
concierge.notifications.dummy_notifier)
@pytest.fixture
def ptmpdir(request, tmpdir):
for key in "TMP", "TEMPDIR", "TEMP":
os.environ[key] = tmpdir.strpath
request.addfinalizer(lambda: shutil.rmtree(tmpdir.strpath))
return tmpdir
@pytest.fixture
def sysargv(monkeypatch):
argv = ["concierge"]
monkeypatch.setattr(sys, "argv", argv)
return argv
@pytest.fixture
def inotifier(request):
mock = have_mocked(request, "inotify_simple.INotify")
mock.return_value = mock
mock.__enter__.return_value = mock
values = [inotify_simple.Event(0, 0, 0,
os.path.basename(concierge.DEFAULT_RC))]
values *= 3
def side_effect():
if values:
return [values.pop()]
raise KeyboardInterrupt
mock.read.side_effect = side_effect
mock.v = values
return mock
@pytest.fixture
def template_render(request):
return have_mocked(request, concierge.templater.Templater, "render")
@pytest.fixture(params=(None, "-d", "--debug"))
def cliparam_debug(request):
return request.param
@pytest.fixture(params=(None, "-v", "--verbose"))
def cliparam_verbose(request):
return request.param
@pytest.fixture(params=(None, "-s", "--source-path"))
def cliparam_source_path(request):
return request.param
@pytest.fixture(params=(None, "-o", "--destination-path"))
def cliparam_destination_path(request):
return request.param
@pytest.fixture(params=(None, "-b", "--boring-syntax"))
def cliparam_boring_syntax(request):
return request.param
@pytest.fixture(params=(None, "-a", "--add-header"))
def cliparam_add_header(request):
return request.param
@pytest.fixture(params=(None, "-t", "--no-templater"))
def cliparam_no_templater(request):
return request.param
@pytest.fixture(params=(None, "--systemd"))
def cliparam_systemd(request):
return request.param
@pytest.fixture(params=(None, "--curlsh"))
def cliparam_curlsh(request):
return request.param
@pytest.fixture(params=(None, "-n", "--no-desktop-notifications"))
def cliparam_no_desktop_notifications(request):
return request.param
@pytest.fixture
def cliargs_default(sysargv):
return sysargv
@pytest.fixture
def cliargs_fullset(sysargv, cliparam_debug, cliparam_verbose,
cliparam_source_path, cliparam_destination_path,
cliparam_boring_syntax, cliparam_add_header,
cliparam_no_templater, cliparam_no_desktop_notifications):
options = {
"debug": cliparam_debug,
"verbose": cliparam_verbose,
"source_path": cliparam_source_path,
"destination_path": cliparam_destination_path,
"add_header": cliparam_add_header,
"boring_syntax": cliparam_boring_syntax,
"no_templater": cliparam_no_templater,
"no_desktop_notifications": cliparam_no_desktop_notifications}
bool_params = (
cliparam_debug, cliparam_verbose, cliparam_boring_syntax,
cliparam_add_header, cliparam_no_desktop_notifications)
value_params = (
cliparam_source_path, cliparam_destination_path)
for param in bool_params:
if param:
sysargv.append(param)
for param in value_params:
if param:
sysargv.append(param)
sysargv.append("/path/to")
if cliparam_no_templater:
sysargv.append(cliparam_no_templater)
return sysargv, options
@pytest.fixture
def cliargs_concierge_fullset(cliargs_fullset, cliparam_systemd,
cliparam_curlsh):
sysargv, options = cliargs_fullset
for param in cliparam_systemd, cliparam_curlsh:
if param:
sysargv.append(param)
options["systemd"] = cliparam_systemd
options["curlsh"] = cliparam_curlsh
return sysargv, options
@pytest.fixture
def mock_mainfunc(cliargs_default, mock_get_content, inotifier):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
return cliargs_default, mock_get_content, inotifier
|
54ae45564d18588dbafc533a224e5fbe2e3c3d07
|
94dbd40525692416ea8100d13b03ece73ee33f7f
|
/kgtk/cli/__dummy.py
|
43cbcdefce64e0bab78aafe423a160ef64e01b76
|
[
"MIT"
] |
permissive
|
usc-isi-i2/kgtk
|
7101129ce1dde646095803429d3c751bf87ae071
|
c31ba4c33d5f925fdb66a487ba2e1184c9ca4254
|
refs/heads/main
| 2023-08-22T06:58:22.301262
| 2023-06-29T19:55:28
| 2023-06-29T19:55:28
| 234,676,361
| 325
| 53
|
MIT
| 2023-06-29T19:55:30
| 2020-01-18T03:34:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
__dummy.py
|
"""
Example CLI module
Please DON'T import specific modules globally, import them in `run`.
Please DON'T initialize resource (e.g., variable) globally.
"""
from kgtk.cli_argparse import KGTKArgumentParser
def parser():
"""
Initialize sub-parser.
Parameters: https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser
"""
return {
'help': 'this is example',
'description': 'this is a basic example'
}
def add_arguments(parser: KGTKArgumentParser):
"""
Parse arguments
Args:
parser (kgtk.cli_argparse.KGTKArgumentParser)
"""
parser.add_argument(action="store", type=str, metavar="name", dest="name")
parser.add_argument("-i", "--info", action="store", type=str, dest="info")
parser.add_argument("-e", "--error", action="store_true", help="raise an error")
parser.accept_shared_argument('_debug')
def run(name, info, error, _debug):
"""
Arguments here should be defined in `add_arguments` first.
The return value (integer) will be the return code in shell. It will set to 0 if no value returns.
Though you can return a non-zero value to indicate error, raise exceptions defined in kgtk.exceptions is preferred
since this gives user an unified error code and message.
"""
# import modules locally
import socket
from kgtk.exceptions import KGTKException
if _debug:
print('DEBUG MODE')
if error:
raise KGTKException('An error here\n')
print('name: {}, info: {}\nhost: {}'.format(name, info, socket.gethostname()))
|
f7ba334a4b3e4f3018eba5229ce6b07b9339fac6
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/train/examples/horovod/horovod_tune_example.py
|
05cb5b220414a23d14115b5439e3c34ade810c1b
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,945
|
py
|
horovod_tune_example.py
|
import numpy as np
import time
import torch
import ray
from ray import train, tune
import ray.train.torch
from ray.train.horovod import HorovodTrainer
from ray.train import ScalingConfig
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
def sq(x):
m2 = 1.0
m1 = -20.0
m0 = 50.0
return m2 * x * x + m1 * x + m0
def qu(x):
m3 = 10.0
m2 = 5.0
m1 = -20.0
m0 = -5.0
return m3 * x * x * x + m2 * x * x + m1 * x + m0
class Net(torch.nn.Module):
def __init__(self, mode="sq"):
super(Net, self).__init__()
if mode == "square":
self.mode = 0
self.param = torch.nn.Parameter(torch.FloatTensor([1.0, -1.0]))
else:
self.mode = 1
self.param = torch.nn.Parameter(torch.FloatTensor([1.0, -1.0, 1.0]))
def forward(self, x):
if ~self.mode:
return x * x + self.param[0] * x + self.param[1]
else:
return_val = 10 * x * x * x
return_val += self.param[0] * x * x
return_val += self.param[1] * x + self.param[2]
return return_val
def train_loop_per_worker(config):
import torch
import horovod.torch as hvd
hvd.init()
device = ray.train.torch.get_device()
mode = config["mode"]
net = Net(mode).to(device)
optimizer = torch.optim.SGD(
net.parameters(),
lr=config["lr"],
)
optimizer = hvd.DistributedOptimizer(optimizer)
num_steps = 5
print(hvd.size())
np.random.seed(1 + hvd.rank())
torch.manual_seed(1234)
# To ensure consistent initialization across workers,
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
start = time.time()
x_max = config["x_max"]
for step in range(1, num_steps + 1):
features = torch.Tensor(np.random.rand(1) * 2 * x_max - x_max).to(device)
if mode == "square":
labels = sq(features)
else:
labels = qu(features)
optimizer.zero_grad()
outputs = net(features)
loss = torch.nn.MSELoss()(outputs, labels)
loss.backward()
optimizer.step()
time.sleep(0.1)
train.report(dict(loss=loss.item()))
total = time.time() - start
print(f"Took {total:0.3f} s. Avg: {total / num_steps:0.3f} s.")
def tune_horovod(num_workers, num_samples, use_gpu, mode="square", x_max=1.0):
horovod_trainer = HorovodTrainer(
train_loop_per_worker=train_loop_per_worker,
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
train_loop_config={"mode": mode, "x_max": x_max},
)
tuner = Tuner(
horovod_trainer,
param_space={"train_loop_config": {"lr": tune.uniform(0.1, 1)}},
tune_config=TuneConfig(mode="min", metric="loss", num_samples=num_samples),
_tuner_kwargs={"fail_fast": True},
)
result_grid = tuner.fit()
print("Best hyperparameters found were: ", result_grid.get_best_result().config)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", type=str, default="square", choices=["square", "cubic"]
)
parser.add_argument(
"--learning_rate", type=float, default=0.1, dest="learning_rate"
)
parser.add_argument("--x_max", type=float, default=1.0, dest="x_max")
parser.add_argument("--gpu", action="store_true")
parser.add_argument(
"--smoke-test", action="store_true", help=("Finish quickly for testing.")
)
parser.add_argument("--num-workers", type=int, default=2)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=3)
tune_horovod(
num_workers=args.num_workers,
num_samples=2 if args.smoke_test else 10,
use_gpu=args.gpu,
mode=args.mode,
x_max=args.x_max,
)
|
41a9fe234cccc4cdfe4f7ebdb4efef2af98ac56a
|
0276068b108b0bde776bd73b4cd75ab37b63f319
|
/ur_robot_driver/test/integration_test.py
|
c15a8cdf45b0d31f68750bd3bd54e7a2759f0e77
|
[
"Apache-2.0"
] |
permissive
|
UniversalRobots/Universal_Robots_ROS_Driver
|
86c1e80cf2814d0a5c5ad1c6d3195b859742ef99
|
395c0541b20d0da2cd480e2ad85b2100410fb043
|
refs/heads/master
| 2023-08-31T07:28:19.585562
| 2023-01-23T14:31:27
| 2023-01-23T14:36:07
| 212,798,234
| 666
| 417
|
Apache-2.0
| 2023-08-31T13:38:28
| 2019-10-04T11:20:45
|
C++
|
UTF-8
|
Python
| false
| false
| 18,637
|
py
|
integration_test.py
|
#!/usr/bin/env python
import sys
import time
import unittest
import rospy
import actionlib
import std_msgs.msg
from control_msgs.msg import (
FollowJointTrajectoryAction,
FollowJointTrajectoryGoal,
FollowJointTrajectoryResult,
JointTolerance)
from ur_dashboard_msgs.msg import SetModeAction, SetModeGoal, RobotMode
from std_srvs.srv import Trigger, TriggerRequest
import tf
from trajectory_msgs.msg import JointTrajectoryPoint
from ur_msgs.srv import SetIO, SetIORequest
from ur_msgs.msg import IOStates
from cartesian_control_msgs.msg import (
FollowCartesianTrajectoryAction,
FollowCartesianTrajectoryGoal,
FollowCartesianTrajectoryResult,
CartesianTrajectoryPoint)
import geometry_msgs.msg
from controller_manager_msgs.srv import SwitchControllerRequest, SwitchController
PKG = 'ur_robot_driver'
NAME = 'integration_test'
ALL_CONTROLLERS = [
"scaled_pos_joint_traj_controller",
"pos_joint_traj_controller",
"scaled_vel_joint_traj_controller",
"vel_joint_traj_controller",
"joint_group_vel_controller",
"forward_joint_traj_controller",
"forward_cartesian_traj_controller",
"twist_controller",
"pose_based_cartesian_traj_controller",
"joint_based_cartesian_traj_controller",
]
class IntegrationTest(unittest.TestCase):
def __init__(self, *args):
super(IntegrationTest, self).__init__(*args)
self.init_robot()
def init_robot(self):
"""Make sure the robot is booted and ready to receive commands"""
rospy.init_node('ur_robot_driver_integration_test')
timeout = rospy.Duration(30)
self.set_mode_client = actionlib.SimpleActionClient(
'/ur_hardware_interface/set_mode', SetModeAction)
if not self.set_mode_client.wait_for_server(timeout):
self.fail(
"Could not reach set_mode action. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.trajectory_client = actionlib.SimpleActionClient(
'follow_joint_trajectory', FollowJointTrajectoryAction)
if not self.trajectory_client.wait_for_server(timeout):
self.fail(
"Could not reach controller action. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.cartesian_passthrough_trajectory_client = actionlib.SimpleActionClient(
'forward_cartesian_trajectory', FollowCartesianTrajectoryAction)
if not self.cartesian_passthrough_trajectory_client.wait_for_server(timeout):
self.fail(
"Could not reach cartesian passthrough controller action. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.joint_passthrough_trajectory_client = actionlib.SimpleActionClient(
'forward_joint_trajectory', FollowJointTrajectoryAction)
if not self.joint_passthrough_trajectory_client.wait_for_server(timeout):
self.fail(
"Could not reach joint passthrough controller action. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.cartesian_trajectory_client = actionlib.SimpleActionClient(
'follow_cartesian_trajectory', FollowCartesianTrajectoryAction)
if not self.cartesian_trajectory_client.wait_for_server(timeout):
self.fail(
"Could not reach cartesian controller action. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.set_io_client = rospy.ServiceProxy('/ur_hardware_interface/set_io', SetIO)
try:
self.set_io_client.wait_for_service(timeout)
except rospy.exceptions.ROSException as err:
self.fail(
"Could not reach SetIO service. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.switch_controllers_client = rospy.ServiceProxy('/controller_manager/switch_controller',
SwitchController)
try:
self.switch_controllers_client.wait_for_service(timeout)
except rospy.exceptions.ROSException as err:
self.fail(
"Could not reach controller switch service. Make sure that the driver is actually running."
" Msg: {}".format(err))
self.send_program_srv = rospy.ServiceProxy('/ur_hardware_interface/resend_robot_program',
Trigger)
try:
self.send_program_srv.wait_for_service(timeout)
except rospy.exceptions.ROSException as err:
self.fail(
"Could not reach resend_robot_program service. Make sure that the driver is "
"actually running in headless mode."
" Msg: {}".format(err))
self.script_publisher = rospy.Publisher("/ur_hardware_interface/script_command", std_msgs.msg.String, queue_size=1)
self.tf_listener = tf.TransformListener()
self.twist_pub = rospy.Publisher("/twist_controller/command", geometry_msgs.msg.Twist, queue_size=1)
def set_robot_to_mode(self, target_mode):
goal = SetModeGoal()
goal.target_robot_mode.mode = RobotMode.RUNNING
goal.play_program = False # we use headless mode during tests
# This might be a bug to hunt down. We have to reset the program before calling `resend_robot_program`
goal.stop_program = True
self.set_mode_client.send_goal(goal)
self.set_mode_client.wait_for_result()
return self.set_mode_client.get_result().success
def test_joint_trajectory_position_interface(self):
"""Test robot movement"""
#### Power cycle the robot in order to make sure it is running correctly####
self.assertTrue(self.set_robot_to_mode(RobotMode.POWER_OFF))
rospy.sleep(0.5)
self.assertTrue(self.set_robot_to_mode(RobotMode.RUNNING))
rospy.sleep(0.5)
self.send_program_srv.call()
rospy.sleep(0.5) # TODO properly wait until the controller is running
self.switch_on_controller("scaled_pos_joint_traj_controller")
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["elbow_joint", "shoulder_lift_joint", "shoulder_pan_joint",
"wrist_1_joint", "wrist_2_joint", "wrist_3_joint"]
position_list = [[0.0 for i in range(6)]]
position_list.append([-0.5 for i in range(6)])
position_list.append([-1.0 for i in range(6)])
duration_list = [6.0, 9.0, 12.0]
for i, position in enumerate(position_list):
point = JointTrajectoryPoint()
point.positions = position
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
rospy.loginfo("Sending simple goal")
self.trajectory_client.send_goal(goal)
self.trajectory_client.wait_for_result()
self.assertEqual(self.trajectory_client.get_result().error_code,
FollowJointTrajectoryResult.SUCCESSFUL)
rospy.loginfo("Received result SUCCESSFUL")
"""Test trajectory server. This is more of a validation test that the testing suite does the
right thing."""
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["elbow_joint", "shoulder_lift_joint", "shoulder_pan_joint",
"wrist_1_joint", "wrist_2_joint", "wrist_3_joint"]
position_list = [[0.0 for i in range(6)]]
position_list.append([-0.5 for i in range(6)])
# Create illegal goal by making the second point come earlier than the first
duration_list = [6.0, 3.0]
for i, position in enumerate(position_list):
point = JointTrajectoryPoint()
point.positions = position
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
rospy.loginfo("Sending illegal goal")
self.trajectory_client.send_goal(goal)
self.trajectory_client.wait_for_result()
# As timings are illegal, we expect the result to be INVALID_GOAL
self.assertEqual(self.trajectory_client.get_result().error_code,
FollowJointTrajectoryResult.INVALID_GOAL)
rospy.loginfo("Received result INVALID_GOAL")
"""Test robot movement"""
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["elbow_joint", "shoulder_lift_joint", "shoulder_pan_joint",
"wrist_1_joint", "wrist_2_joint", "wrist_3_joint"]
position_list = [[0.0 for i in range(6)]]
position_list.append([-1.0 for i in range(6)])
duration_list = [6.0, 6.5]
for i, position in enumerate(position_list):
point = JointTrajectoryPoint()
point.positions = position
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
rospy.loginfo("Sending scaled goal without time restrictions")
self.trajectory_client.send_goal(goal)
self.trajectory_client.wait_for_result()
self.assertEqual(self.trajectory_client.get_result().error_code,
FollowJointTrajectoryResult.SUCCESSFUL)
rospy.loginfo("Received result SUCCESSFUL")
# Now do the same again, but with a goal time constraint
rospy.loginfo("Sending scaled goal with time restrictions")
goal.goal_time_tolerance = rospy.Duration(0.01)
self.trajectory_client.send_goal(goal)
self.trajectory_client.wait_for_result()
self.assertEqual(self.trajectory_client.get_result().error_code,
FollowJointTrajectoryResult.GOAL_TOLERANCE_VIOLATED)
rospy.loginfo("Received result GOAL_TOLERANCE_VIOLATED")
def test_set_io(self):
"""Test to set an IO and check whether it has been set."""
maximum_messages = 5
pin = 0
self.assertEqual(maximum_messages, 5)
self.set_io_client(1, pin, 0)
messages = 0
pin_state = True
while(pin_state):
if messages >= maximum_messages:
self.fail("Could not read desired state after {} messages.".format(maximum_messages))
io_state = rospy.wait_for_message('/ur_hardware_interface/io_states', IOStates)
pin_state = io_state.digital_out_states[pin].state
messages += 1
self.assertEqual(pin_state, 0)
self.set_io_client(SetIORequest.FUN_SET_DIGITAL_OUT, pin, 1)
messages = 0
pin_state = False
while(not pin_state):
if messages >= maximum_messages:
self.fail("Could not read desired state after {} messages.".format(maximum_messages))
io_state = rospy.wait_for_message('/ur_hardware_interface/io_states', IOStates)
pin_state = io_state.digital_out_states[pin].state
messages += 1
self.assertEqual(pin_state, 1)
def test_cartesian_passthrough(self):
#### Power cycle the robot in order to make sure it is running correctly####
self.assertTrue(self.set_robot_to_mode(RobotMode.POWER_OFF))
rospy.sleep(0.5)
self.assertTrue(self.set_robot_to_mode(RobotMode.RUNNING))
rospy.sleep(0.5)
# Make sure the robot is at a valid start position for our cartesian motions
self.script_publisher.publish("movej([1, -1.7, -1.7, -1, -1.57, -2])")
# As we don't have any feedback from that interface, sleep for a while
rospy.sleep(5)
self.send_program_srv.call()
rospy.sleep(0.5) # TODO properly wait until the controller is running
self.switch_on_controller("forward_cartesian_traj_controller")
position_list = [geometry_msgs.msg.Vector3(0.4,0.4,0.4)]
position_list.append(geometry_msgs.msg.Vector3(0.5,0.5,0.5))
duration_list = [3.0, 6.0]
goal = FollowCartesianTrajectoryGoal()
for i, position in enumerate(position_list):
point = CartesianTrajectoryPoint()
point.pose = geometry_msgs.msg.Pose(position, geometry_msgs.msg.Quaternion(0,0,0,1))
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
self.cartesian_passthrough_trajectory_client.send_goal(goal)
self.cartesian_passthrough_trajectory_client.wait_for_result()
self.assertEqual(self.cartesian_passthrough_trajectory_client.get_result().error_code,
FollowCartesianTrajectoryResult.SUCCESSFUL)
rospy.loginfo("Received result SUCCESSFUL")
def test_joint_passthrough(self):
#### Power cycle the robot in order to make sure it is running correctly####
self.assertTrue(self.set_robot_to_mode(RobotMode.POWER_OFF))
rospy.sleep(0.5)
self.assertTrue(self.set_robot_to_mode(RobotMode.RUNNING))
rospy.sleep(0.5)
self.send_program_srv.call()
rospy.sleep(0.5) # TODO properly wait until the controller is running
self.switch_on_controller("forward_joint_traj_controller")
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["elbow_joint", "shoulder_lift_joint", "shoulder_pan_joint",
"wrist_1_joint", "wrist_2_joint", "wrist_3_joint"]
position_list = [[0,-1.57,-1.57,0,0,0]]
position_list.append([0.2,-1.57,-1.57,0,0,0])
position_list.append([-0.5,-1.57,-1.2,0,0,0])
duration_list = [3.0, 7.0, 10.0]
for i, position in enumerate(position_list):
point = JointTrajectoryPoint()
point.positions = position
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
for i, joint_name in enumerate(goal.trajectory.joint_names):
goal.goal_tolerance.append(JointTolerance(joint_name, 0.2, 0.2, 0.2))
goal.goal_time_tolerance = rospy.Duration(0.6)
self.joint_passthrough_trajectory_client.send_goal(goal)
self.joint_passthrough_trajectory_client.wait_for_result()
self.assertEqual(self.joint_passthrough_trajectory_client.get_result().error_code,
FollowJointTrajectoryResult.SUCCESSFUL)
rospy.loginfo("Received result SUCCESSFUL")
def test_cartesian_trajectory_pose_interface(self):
#### Power cycle the robot in order to make sure it is running correctly####
self.assertTrue(self.set_robot_to_mode(RobotMode.POWER_OFF))
rospy.sleep(0.5)
self.assertTrue(self.set_robot_to_mode(RobotMode.RUNNING))
rospy.sleep(0.5)
# Make sure the robot is at a valid start position for our cartesian motions
self.script_publisher.publish("movej([1, -1.7, -1.7, -1, -1.57, -2])")
# As we don't have any feedback from that interface, sleep for a while
rospy.sleep(5)
self.send_program_srv.call()
rospy.sleep(0.5) # TODO properly wait until the controller is running
self.switch_on_controller("pose_based_cartesian_traj_controller")
position_list = [geometry_msgs.msg.Vector3(0.4,0.4,0.4)]
position_list.append(geometry_msgs.msg.Vector3(0.5,0.5,0.5))
duration_list = [3.0, 6.0]
goal = FollowCartesianTrajectoryGoal()
for i, position in enumerate(position_list):
point = CartesianTrajectoryPoint()
point.pose = geometry_msgs.msg.Pose(position, geometry_msgs.msg.Quaternion(0,0,0,1))
point.time_from_start = rospy.Duration(duration_list[i])
goal.trajectory.points.append(point)
self.cartesian_trajectory_client.send_goal(goal)
self.cartesian_trajectory_client.wait_for_result()
self.assertEqual(self.cartesian_trajectory_client.get_result().error_code,
FollowCartesianTrajectoryResult.SUCCESSFUL)
rospy.loginfo("Received result SUCCESSFUL")
def test_twist_interface(self):
#### Power cycle the robot in order to make sure it is running correctly####
self.assertTrue(self.set_robot_to_mode(RobotMode.POWER_OFF))
rospy.sleep(0.5)
self.assertTrue(self.set_robot_to_mode(RobotMode.RUNNING))
rospy.sleep(0.5)
# Make sure the robot is at a valid start position for our cartesian motions
self.script_publisher.publish("movej([1, -1.7, -1.7, -1, -1.57, -2])")
# As we don't have any feedback from that interface, sleep for a while
rospy.sleep(5)
self.send_program_srv.call()
rospy.sleep(0.5) # TODO properly wait until the controller is running
self.switch_on_controller("twist_controller")
# Lookup tcp in base_frame
(trans_start, rot_start) = self.tf_listener.lookupTransform('base', 'tool0_controller', rospy.Time(0))
twist = geometry_msgs.msg.Twist()
twist.linear.x = 0.1
twist.linear.y = 0.0
twist.linear.z = 0.0
twist.angular.x = 0.0
twist.angular.y = 0.0
twist.angular.z = 0.0
# publish twist
self.twist_pub.publish(twist)
# wait 1 sec
rospy.sleep(1)
# stop robot
twist.linear.x = 0.0
self.twist_pub.publish(twist)
(trans_end, rot_end) = self.tf_listener.lookupTransform('base', 'tool0_controller', rospy.Time(0))
self.assertAlmostEqual(rot_start[0], rot_end[0], delta=3e-6)
self.assertAlmostEqual(rot_start[1], rot_end[1], delta=1e-6)
self.assertAlmostEqual(rot_start[2], rot_end[2], delta=1e-6)
self.assertAlmostEqual(trans_start[1], trans_end[1], delta=1e-6)
self.assertAlmostEqual(trans_start[2], trans_end[2], delta=1e-6)
self.assertTrue(trans_end[0] > trans_start[0])
def switch_on_controller(self, controller_name):
"""Switches on the given controller stopping all other known controllers with best_effort
strategy."""
srv = SwitchControllerRequest()
srv.stop_controllers = ALL_CONTROLLERS
srv.start_controllers = [controller_name]
srv.strictness = SwitchControllerRequest.BEST_EFFORT
result = self.switch_controllers_client(srv)
self.assertTrue(result.ok)
if __name__ == '__main__':
import rostest
rostest.run(PKG, NAME, IntegrationTest, sys.argv)
|
8e33ef5585e3d50de86426979e6df40036de272c
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/libtbx/tst_program_template.py
|
573d2b25aa76810c9af31c8c7db06584e563b478
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
tst_program_template.py
|
from __future__ import absolute_import, division, print_function
import sys
import libtbx.phil
from libtbx.program_template import ProgramTemplate
from libtbx.utils import multi_out
from libtbx.version import get_version
# =============================================================================
class TestProgram(ProgramTemplate):
master_phil = """
parameter_a = None
.type = str
parameter_b = 0
.type = int
parameter_c = None
.type = float
"""
class TestVersionProgram(ProgramTemplate):
version = 'abc'
master_phil = """
parameter_a = None
.type = str
parameter_b = 0
.type = int
parameter_c = None
.type = float
"""
working_phil = libtbx.phil.parse("parameter_a = not None\nparameter_b = 5")
# -----------------------------------------------------------------------------
def test_phil():
master_phil = libtbx.phil.parse(TestProgram.master_phil)
required_output_phil = libtbx.phil.parse(ProgramTemplate.output_phil_str)
master_phil.adopt_scope(required_output_phil)
params = master_phil.fetch(working_phil).extract()
logger = multi_out()
logger.register('stdout', sys.stdout)
test_program = TestProgram(None, params, master_phil, logger)
full_phil = libtbx.phil.parse(test_program.get_program_phil_str())
full = master_phil.fetch(full_phil).extract()
assert full.parameter_a == 'not None'
assert full.parameter_b == 5
assert full.parameter_c is None
assert 'parameter_c' in test_program.get_program_phil_str()
assert 'parameter_c' not in test_program.get_program_phil_str(True)
assert test_program.get_default_output_filename() == 'cctbx_program_000'
assert test_program.get_default_output_filename(prefix='abc') == 'abc_000'
assert test_program.get_default_output_filename(suffix='abc') == 'cctbx_programabc_000'
assert test_program.get_default_output_filename(serial=999) == 'cctbx_program_999'
assert test_program.get_default_output_filename(prefix='abc', suffix='def', serial=123) == 'abcdef_123'
test_program.params.output.prefix = 'prefix'
test_program.params.output.suffix = 'suffix'
test_program.params.output.serial = 7
assert test_program.get_default_output_filename() == 'prefixsuffix_007'
assert test_program.get_default_output_filename(prefix='abc') == 'abcsuffix_007'
assert test_program.get_default_output_filename(suffix='abc') == 'prefixabc_007'
assert test_program.get_default_output_filename(serial=999) == 'prefixsuffix_999'
try:
test_program.get_default_output_filename(serial='abc')
except ValueError as e:
if str(e) != 'The serial argument should be an integer.':
raise
# -----------------------------------------------------------------------------
def test_version():
assert TestProgram.get_version() == get_version()
assert TestVersionProgram.get_version() == TestVersionProgram.version
# =============================================================================
if __name__ == '__main__':
test_phil()
|
9b7e7d62c6d951ff3adf7f5174707caa9b2dc883
|
fe48b44c467d828346f7de4918dd5bd3fa5f5872
|
/gcp-cloud-functions/python/handled_exception/main.py
|
0e820f9a8d7c36037082e1343d5c9bd1103b03aa
|
[
"MIT"
] |
permissive
|
getsentry/examples
|
5b54ec6108c713537d146fbe3fb4eda3a1b02502
|
3e780087adf95ae7b83f5825451520a6af39d396
|
refs/heads/master
| 2023-08-22T11:24:19.709461
| 2023-06-21T14:20:32
| 2023-06-21T14:20:32
| 97,693,091
| 125
| 125
|
MIT
| 2023-09-08T17:26:46
| 2017-07-19T08:35:33
|
C#
|
UTF-8
|
Python
| false
| false
| 866
|
py
|
main.py
|
# /**
# *******************************************************************************
# * File name : main.py
# * Description : This file contains code that instruments handled exception
# *******************************************************************************
# **/
# Import Sentry library
import sentry_sdk
from sentry_sdk.integrations.gcp import GcpIntegration
# Configure Sentry SDK
sentry_sdk.init(
dsn="<your DSN>",
integrations=[GcpIntegration()],
traces_sample_rate=1.0
)
def cloud_handler(event, context):
"""Cloud function which raises an exception
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
try:
division_by_zero = 1/0
except Exception as e:
# handle ZeroDivisionError exception
print(e)
raise
|
180bbea68ac9cd45a7b2db07c4962f5b9e8b6ffb
|
9be0819eef9acd194ba1f3687072d3dbc239c29a
|
/tests/fixtures/__init__.py
|
dc2eb04183ba62c551f1b94f00c08949a2bb6d4d
|
[
"Apache-2.0"
] |
permissive
|
mailgun/talon
|
2a438aa2cac36e645c360dedf31f6455f94ee2c1
|
71d9b6eb78e985bcdfbf99b69c20c001b4b818c4
|
refs/heads/master
| 2023-09-04T15:34:08.602748
| 2022-02-07T07:43:25
| 2022-02-07T07:43:25
| 22,198,589
| 886
| 308
|
Apache-2.0
| 2023-07-27T16:26:43
| 2014-07-24T04:12:04
|
Python
|
UTF-8
|
Python
| false
| false
| 361
|
py
|
__init__.py
|
STANDARD_REPLIES = "tests/fixtures/standard_replies"
with open("tests/fixtures/reply-quotations-share-block.eml") as f:
REPLY_QUOTATIONS_SHARE_BLOCK = f.read()
with open("tests/fixtures/OLK_SRC_BODY_SECTION.html") as f:
OLK_SRC_BODY_SECTION = f.read()
with open("tests/fixtures/reply-separated-by-hr.html") as f:
REPLY_SEPARATED_BY_HR = f.read()
|
2be308f4e578f069697fc5b716cfeb5041101c58
|
46201552303331f68418e67231cce5f4688b85e3
|
/awesome_gans/magan/magan_train.py
|
d35779c9904b86c649556f35506e8c03f2bfd0e2
|
[
"MIT"
] |
permissive
|
kozistr/Awesome-GANs
|
6c8d7a62eefe4f60b3b8e4261d073c74f22dd52c
|
6548b49d8c05459f7b252c17d0959b5825d2fc69
|
refs/heads/master
| 2023-07-19T09:37:55.311749
| 2022-06-25T12:03:10
| 2022-06-25T12:03:10
| 92,664,599
| 820
| 195
|
MIT
| 2021-03-30T01:46:01
| 2017-05-28T14:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,449
|
py
|
magan_train.py
|
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.magan.magan_model as magan
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
results = {'output': './gen_img/', 'model': './model/MAGAN-model.ckpt'}
train_step = {
'epochs': 50,
'batch_size': 64,
'global_step': 200001,
'logging_interval': 1000,
}
def main():
start_time = time.time() # Clocking start
# loading CelebA DataSet
ds = DataSet(
height=64,
width=64,
channel=3,
ds_image_path="D:/DataSet/CelebA/CelebA-64.h5",
ds_label_path="D:/DataSet/CelebA/Anno/list_attr_celeba.txt",
# ds_image_path="D:/DataSet/CelebA/Img/img_align_celeba/",
ds_type="CelebA",
use_save=False,
save_file_name="D:/DataSet/CelebA/CelebA-64.h5",
save_type="to_h5",
use_img_scale=False,
img_scale="-1,1",
)
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:100], inv_type='127'), (100, 64, 64, 3))
iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# MAGAN Model
model = magan.MAGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
# Load model & Graph & Weights
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %s" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
n_steps = ds.num_images // model.batch_size # training set size
# Pre-Train
print("[*] pre-training - getting proper Margin")
margin = 0 # 3.0585415484215974
if margin == 0:
sum_d_loss = 0.0
for i in range(2):
for batch_x in ds_iter.iterate():
batch_x = np.reshape(
iu.transform(batch_x, inv_type='127'),
(model.batch_size, model.height, model.width, model.channel),
)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
_, d_real_loss = s.run(
[model.d_op, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: 0.0,
},
)
sum_d_loss += d_real_loss
print("[*] Epoch {:1d} Sum of d_real_loss : {:.8f}".format(i + 1, sum_d_loss))
# Initial margin value
margin = sum_d_loss / n_steps
print("[+] Margin : {0}".format(margin))
old_margin = margin
s_g_0 = np.inf # Sg_0 = infinite
global_step = saved_global_step
start_epoch = global_step // (ds.num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (ds.num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epochs']):
s_d, s_g = 0.0, 0.0
for batch_x in ds_iter.iterate():
batch_x = iu.transform(batch_x, inv_type='127')
batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel))
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss, d_real_loss = s.run(
[model.d_op, model.d_loss, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update D real sample
s_d += np.sum(d_real_loss)
# Update G network
_, g_loss, d_fake_loss = s.run(
[model.g_op, model.g_loss, model.d_fake_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update G fake sample
s_g += np.sum(d_fake_loss)
# Logging
if global_step % train_step['logging_interval'] == 0:
summary = s.run(
model.merged,
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Print loss
print(
"[+] Epoch %03d Global Step %05d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(
model.g,
feed_dict={
model.z: sample_z,
model.m: margin,
},
)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = results['output'] + 'train_{:08d}.png'.format(global_step)
# Generated image save
iu.save_images(
samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127'
)
# Model save
model.saver.save(s, results['model'], global_step)
global_step += 1
# Update margin
if s_d / n_steps < margin and s_d < s_g and s_g_0 <= s_g:
margin = s_d / n_steps
print("[*] Margin updated from {:8f} to {:8f}".format(old_margin, margin))
old_margin = margin
s_g_0 = s_g
# Convergence Measure
e_d = s_d / n_steps
e_g = s_g / n_steps
l_ = e_d + np.abs(e_d - e_g)
print("[+] Epoch %03d " % epoch, " L : {:.8f}".format(l_))
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
c737e19010600a926898713b5925661e122bc37b
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/数论/快速幂/等差等比数列求和.py
|
e120a07110bbce2924dd844210e8c5e864340705
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
等差等比数列求和.py
|
# 等差等比数列求和
# https://atcoder.jp/contests/abc293/editorial/5955
|
74fb328ab45c8faba585de3b60236a1c2423329c
|
4658aa41017b2e6da830f1e879774e4a7296c314
|
/holoviews/tests/plotting/matplotlib/test_plot.py
|
4b946f338c4413bcdcc8fe13b1d2033ff9b38841
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/holoviews
|
3f133e572933c94cedad7bae6fb6d071152842fc
|
e3dee5443dad84b507734c0a3d2bba8ec44f5653
|
refs/heads/main
| 2023-09-03T05:08:42.682432
| 2023-08-28T20:40:36
| 2023-08-28T20:40:36
| 19,542,768
| 1,223
| 223
|
BSD-3-Clause
| 2023-09-14T18:15:53
| 2014-05-07T16:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
test_plot.py
|
import matplotlib.pyplot as plt
import pyviz_comms as comms
from holoviews.core.options import Store
from holoviews.element.comparison import ComparisonTestCase
from holoviews.plotting.mpl.element import ElementPlot
from param import concrete_descendents
from .. import option_intersections
mpl_renderer = Store.renderers['matplotlib']
class TestPlotDefinitions(ComparisonTestCase):
known_clashes = [(('Arrow',), {'fontsize'})]
def test_matplotlib_plot_definitions(self):
self.assertEqual(option_intersections('matplotlib'), self.known_clashes)
class TestMPLPlot(ComparisonTestCase):
def setUp(self):
self.previous_backend = Store.current_backend
self.comm_manager = mpl_renderer.comm_manager
mpl_renderer.comm_manager = comms.CommManager
Store.set_current_backend('matplotlib')
self._padding = {}
for plot in concrete_descendents(ElementPlot).values():
self._padding[plot] = plot.padding
plot.padding = 0
def tearDown(self):
Store.current_backend = self.previous_backend
mpl_renderer.comm_manager = self.comm_manager
plt.close(plt.gcf())
for plot, padding in self._padding.items():
plot.padding = padding
|
ed21333469657666ca701eb03852d852233b58ea
|
407d194b52fe9cf75cca9d6f3c162a565549a1ae
|
/VMBackup/main/backuplogger.py
|
6f83a1d55148bbc21732e639ac0e32b39ae1ac5d
|
[
"Apache-2.0"
] |
permissive
|
Azure/azure-linux-extensions
|
808761f927045f00548aa68e38d4bec8651c0eba
|
3cea1567fc4f4eb5beea9884153e92d70610394d
|
refs/heads/master
| 2023-08-27T14:06:05.775617
| 2023-08-23T01:56:05
| 2023-08-23T01:56:05
| 19,841,123
| 300
| 314
|
Apache-2.0
| 2023-09-14T04:21:26
| 2014-05-16T01:38:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,890
|
py
|
backuplogger.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import string
import time
import traceback
from blobwriter import BlobWriter
from Utils.WAAgentUtil import waagent
import sys
class Backuplogger(object):
def __init__(self, hutil):
self.msg = ''
self.con_path = '/dev/console'
self.enforced_local_flag_value = True
self.hutil = hutil
self.prev_log = ''
self.logging_off = False
def enforce_local_flag(self, enforced_local):
#Pause file logging during I/O freeze period by setting Enforced_local_flag_value to False
#Enforced_local_flag_value is turned to False from True when Freeze Starts
#Enforced_local_flag_value is turned to True from False when Freeze Ends
if (self.hutil.get_intvalue_from_configfile('LoggingOff', 0) == 1):
self.logging_off = True
if (self.enforced_local_flag_value != False and enforced_local == False and self.logging_off == True):
pass
elif (self.enforced_local_flag_value != False and enforced_local == False):
self.msg = self.msg + "================== Logs during Freeze Start ==============" + "\n"
elif (self.enforced_local_flag_value == False and enforced_local == True):
self.msg = self.msg + "================== Logs during Freeze End ==============" + "\n"
self.commit_to_local()
self.enforced_local_flag_value = enforced_local
"""description of class"""
def log(self, msg, local=False, level='Info'):
if(self.enforced_local_flag_value == False and self.logging_off == True):
return
WriteLog = self.hutil.get_strvalue_from_configfile('WriteLog','True')
if (WriteLog == None or WriteLog == 'True'):
log_msg = ""
if sys.version_info > (3,):
log_msg = self.log_to_con_py3(msg, level)
else:
log_msg = "{0} {1} {2} \n".format(str(datetime.datetime.utcnow()) , level , msg)
if(self.enforced_local_flag_value != False):
self.log_to_con(log_msg)
if(self.enforced_local_flag_value == False):
self.msg += log_msg
else:
self.hutil.log(str(msg),level)
def log_to_con(self, msg):
try:
with open(self.con_path, "wb") as C :
message = "".join(list(filter(lambda x : x in string.printable, msg)))
C.write(message.encode('ascii','ignore'))
except IOError as e:
pass
except Exception as e:
pass
def log_to_con_py3(self, msg, level='Info'):
log_msg = ""
try:
if type(msg) is not str:
msg = str(msg, errors="backslashreplace")
time = datetime.datetime.utcnow().strftime(u'%Y/%m/%d %H:%M:%S.%f')
log_msg = u"{0} {1} {2} \n".format(time , level , msg)
log_msg= str(log_msg.encode('ascii', "backslashreplace"),
encoding="ascii")
if(self.enforced_local_flag_value != False):
with open(self.con_path, "w") as C :
C.write(log_msg)
except IOError:
pass
except Exception as e:
log_msg = "###### Exception in log_to_con_py3"
return log_msg
def commit(self, logbloburi):
#commit to local file system first, then commit to the network.
try:
self.hutil.log(self.msg)
self.msg = ''
except Exception as e:
pass
try:
self.commit_to_blob(logbloburi)
except Exception as e:
self.hutil.log('commit to blob failed')
def commit_to_local(self):
self.hutil.log(self.msg)
self.msg = ''
def commit_to_blob(self, logbloburi):
UploadStatusAndLog = self.hutil.get_strvalue_from_configfile('UploadStatusAndLog','True')
if (UploadStatusAndLog == None or UploadStatusAndLog == 'True'):
log_to_blob = ""
blobWriter = BlobWriter(self.hutil)
# append the wala log at the end.
try:
# distro information
if(self.hutil is not None and self.hutil.patching is not None and self.hutil.patching.distro_info is not None):
distro_str = ""
if(len(self.hutil.patching.distro_info)>1):
distro_str = self.hutil.patching.distro_info[0] + " " + self.hutil.patching.distro_info[1]
else:
distro_str = self.hutil.patching.distro_info[0]
self.msg = "Distro Info:" + distro_str + "\n" + self.msg
self.msg = "Guest Agent Version is :" + waagent.GuestAgentVersion + "\n" + self.msg
log_to_blob = str(self.hutil.fetch_log_message()) + "Tail of shell script log:" + str(self.hutil.get_shell_script_log())
except Exception as e:
errMsg = 'Failed to get the waagent log with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.hutil.log(errMsg)
blobWriter.WriteBlob(log_to_blob, logbloburi)
def set_prev_log(self):
self.prev_log = self.hutil.get_prev_log()
|
67d7cd00329329e16d3b7d952eab1fe5081cb43d
|
60d6b8501d0be546437b26a6ee1f9fab97ec3897
|
/platypush/backend/http/app/ws/requests.py
|
6fec50f76f4d1289c9f619fe5bf5cf0127fda96c
|
[
"MIT"
] |
permissive
|
BlackLight/platypush
|
68284a85b2f9eef303d26b04530f075927b5834a
|
446bc2f67493d3554c5422242ff91d5b5c76d78a
|
refs/heads/master
| 2023-08-31T21:01:53.519960
| 2023-08-29T22:05:38
| 2023-08-29T22:05:38
| 109,421,017
| 265
| 25
|
MIT
| 2023-09-01T23:15:49
| 2017-11-03T16:56:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
requests.py
|
from threading import Thread, current_thread
from typing import Set
from typing_extensions import override
from platypush.backend.http.app.utils import send_message
from platypush.message.request import Request
from . import WSRoute, logger
class WSRequestsProxy(WSRoute):
"""
Websocket event proxy mapped to ``/ws/requests``.
"""
_max_concurrent_requests: int = 10
""" Maximum number of concurrent requests allowed on the same connection. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._requests: Set[Thread] = set()
@classmethod
@override
def app_name(cls) -> str:
return 'requests'
def _handle_request(self, request: Request):
self._requests.add(current_thread())
try:
response = send_message(request, wait_for_response=True)
self.send(str(response))
finally:
self._requests.remove(current_thread())
def on_message(self, message):
if len(self._requests) > self._max_concurrent_requests:
logger.info('Too many concurrent requests on %s', self)
return
try:
msg = Request.build(message)
assert isinstance(msg, Request), f'Expected {Request}, got {type(msg)}'
except Exception as e:
logger.info('Could not build request from %s: %s', message, e)
logger.exception(e)
return
Thread(target=self._handle_request, args=(msg,)).start()
|
d38a90a007172072fbc3bdb7a44faa4889404b88
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondCore/Utilities/scripts/o2oRun.py
|
dd6e68a71bc3134299be02e24f15fe0c7cb338f7
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
o2oRun.py
|
#!/usr/bin/env python3
'''
'''
__author__ = 'Giacomo Govi'
import sys
import os
import CondCore.Utilities.o2olib as o2olib
import optparse
import argparse
def main( argv ):
parser = argparse.ArgumentParser()
parser.add_argument("executable", type=str, help="wrapper for O2O jobs")
parser.add_argument("-n","--name", type=str, help="the O2O job name" )
parser.add_argument("--db", type=str, help="the target database: pro ( for prod ) or dev ( for prep ). default=pro")
parser.add_argument("-a","--auth", type=str, help="path of the authentication file")
parser.add_argument("-v","--verbose", action="count", help="job output mirrored to screen (default=logfile only)")
args = parser.parse_args()
tool = o2olib.O2OTool()
tool.setup(args)
return tool.run()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
99f5a111b39289a6d71c1e985f5c381c26ed0cc2
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/inv/models/platform.py
|
266e3ea3fd8d2153da3ed37a1f951ea649b000f8
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,144
|
py
|
platform.py
|
# ---------------------------------------------------------------------
# Platform
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import os
import threading
import operator
import uuid
import datetime
# Third-party modules
from mongoengine.document import Document
from mongoengine.fields import StringField, LongField, UUIDField, ListField
from mongoengine.queryset import Q
from mongoengine import signals
from pymongo.collection import ReturnDocument
import cachetools
from bson.int64 import Int64
# NOC modules
from noc.core.mongo.fields import PlainReferenceField, DateField
from noc.core.model.decorator import on_delete_check
from noc.core.bi.decorator import bi_sync, new_bi_id
from noc.core.prettyjson import to_json
from noc.core.change.decorator import change
from noc.models import get_model
from noc.main.models.label import Label
from .vendor import Vendor
id_lock = threading.Lock()
@Label.model
@bi_sync
@change
@on_delete_check(
check=[
("sa.ManagedObject", "platform"),
("inv.FirmwarePolicy", "platform"),
],
clean_lazy_labels="platform",
)
class Platform(Document):
meta = {
"collection": "noc.platforms",
"strict": False,
"auto_create_index": False,
"json_collection": "inv.platforms",
"json_unique_fields": [("vendor", "name")],
"indexes": [
{"fields": ["vendor", "name"], "unique": True},
("vendor", "aliases"),
"labels",
],
}
vendor = PlainReferenceField(Vendor)
name = StringField()
description = StringField(required=False)
# Full name, combined from vendor platform
full_name = StringField(unique=True)
# Platform start of sale date
start_of_sale = DateField()
# Platform end of sale date
end_of_sale = DateField()
# Platform end of support date
end_of_support = DateField()
# End of extended support date (installation local)
end_of_xsupport = DateField()
# SNMP OID value
# sysObjectID.0
snmp_sysobjectid = StringField(regex=r"^1.3.6(\.\d+)+$")
# Global ID
uuid = UUIDField(binary=True)
# Platform aliases
aliases = ListField(StringField())
# Labels
labels = ListField(StringField())
# Object id in BI
bi_id = LongField(unique=True)
_id_cache = cachetools.TTLCache(1000, ttl=60)
_bi_id_cache = cachetools.TTLCache(1000, ttl=60)
_ensure_cache = cachetools.TTLCache(1000, ttl=60)
def __str__(self):
return self.full_name
def clean(self):
self.full_name = "%s %s" % (self.vendor.name, self.name)
if self.aliases:
self.aliases = sorted(a for a in self.aliases if a != self.name)
super().clean()
def save(self, *args, **kwargs):
to_merge_aliases = not hasattr(self, "_changed_fields") or "aliases" in self._changed_fields
super().save(*args, **kwargs)
if to_merge_aliases:
for a in self.aliases:
if a == self.name:
continue
self.merge_platform(a)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
return Platform.objects.filter(id=id).first()
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_bi_id_cache"), lock=lambda _: id_lock)
def get_by_bi_id(cls, id):
return Platform.objects.filter(bi_id=id).first()
def to_json(self) -> str:
r = {
"$collection": self._meta["json_collection"],
"vendor__code": self.vendor.code[0],
"name": self.name,
"uuid": self.uuid,
}
if self.aliases:
r["aliases"] = [str(x) for x in self.aliases]
if self.description:
r["description"] = self.description
if self.start_of_sale:
r["start_of_sale"] = self.start_of_sale.strftime("%Y-%m-%d")
if self.end_of_sale:
r["end_of_sale"] = self.end_of_sale.strftime("%Y-%m-%d")
if self.end_of_support:
r["end_of_support"] = self.end_of_support.strftime("%Y-%m-%d")
if self.snmp_sysobjectid:
r["snmp_sysobjectid"] = self.snmp_sysobjectid
if self.labels:
r["labels"] = self.labels
return to_json(
r,
order=[
"vendor__code",
"name",
"$collection",
"uuid",
"aliases",
"description",
"start_of_sale",
"end_of_sale",
"end_of_support",
"snmp_sysobjectid",
"labels",
],
)
def get_json_path(self) -> str:
return os.path.join(self.vendor.code[0], "%s.json" % self.name.replace("/", "_"))
@classmethod
@cachetools.cachedmethod(
operator.attrgetter("_ensure_cache"),
key=lambda c, v, n, strict=False, labels=None: f"{v.id}-{n}",
lock=lambda _: id_lock,
)
def ensure_platform(cls, vendor, name, strict=False, labels=None):
"""
Get or create platform by vendor and code
:param vendor:
:param name:
:param strict: Return None if platform is not found
:param labels: List of platform labels
:return:
"""
# Try to find platform
q = Q(vendor=vendor.id, name=name) | Q(vendor=vendor.id, aliases=name)
platform = Platform.objects.filter(q).first()
if platform or strict:
return platform
# Try to create
labels = labels or []
pu = uuid.uuid4()
d = Platform._get_collection().find_one_and_update(
{"vendor": vendor.id, "name": name},
{
"$setOnInsert": {
"uuid": pu,
"full_name": "%s %s" % (vendor.name, name),
"bi_id": Int64(new_bi_id()),
"aliases": [],
"labels": labels,
}
},
upsert=True,
return_document=ReturnDocument.AFTER,
)
d["id"] = d["_id"]
del d["_id"]
p = Platform(**d)
signals.post_save.send(cls, document=p, created=True)
p._clear_changed_fields()
p._created = False
return p
@property
def is_end_of_sale(self):
"""
Check if platform reached end-of-sale mark
:return:
"""
if not self.end_of_sale:
return False
return datetime.date.today() > self.end_of_sale
@property
def is_end_of_support(self):
"""
Check if platform reached end-of-support mark
:return:
"""
deadline = []
if self.end_of_support:
deadline += [self.end_of_support]
if self.end_of_xsupport:
deadline += [self.end_of_xsupport]
if deadline:
return datetime.date.today() > max(deadline)
else:
return False
def merge_platform(self, alias):
"""
Merge *alias* platform
:param alias: platform name
:return:
"""
ap = Platform.objects.filter(vendor=self.vendor.id, name=alias).first()
if not ap:
return
# Replace ce platform
refs = self._on_delete["check"] + self._on_delete["clean"] + self._on_delete["delete"]
for model_name, field in refs:
model = get_model(model_name)
for obj in model.objects.filter(**{field: ap.id}):
setattr(obj, field, self)
obj.save()
# Finally delete aliases platform
ap.delete()
@classmethod
def can_set_label(cls, label):
return Label.get_effective_setting(label, setting="enable_platform")
@classmethod
def iter_lazy_labels(cls, platform: "Platform"):
yield f"noc::platform::{platform.name}::="
|
c7495f9fda26a99e61075b68f1c6873f1b2ffa48
|
a38bf459ae380f67e0de22f7106a8df4385a7076
|
/gapic/samplegen_utils/utils.py
|
27ccb50efafc684dc40707bb761ffa4ada1f19c0
|
[
"Apache-2.0"
] |
permissive
|
googleapis/gapic-generator-python
|
73ce9d52f6f5bb2652d49b237b24263d6637b1da
|
4eee26181e8db9fb5144eef5a76f178c1594e48a
|
refs/heads/main
| 2023-09-04T11:12:14.728757
| 2023-09-02T10:34:44
| 2023-09-02T10:34:44
| 129,809,857
| 116
| 65
|
Apache-2.0
| 2023-09-12T18:57:01
| 2018-04-16T21:47:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
utils.py
|
# Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing miscellaneous utilities
that will eventually move somewhere else (probably)."""
import os
import yaml
from typing import (Generator, Tuple, List, Union)
from gapic.samplegen_utils import types
MIN_SCHEMA_VERSION = (1, 2, 0)
VALID_CONFIG_TYPE = "com.google.api.codegen.samplegen.v1p2.SampleConfigProto"
def render_format_string(s: str, expressions: List[str] = []) -> str:
"""Given string s and a list of expressions, substitute each %s
in the string with {exp}.
Arguments:
s (str): The string literal.
expressions (Optional[List[str]]): A list of expressions.
"""
s = s.replace('\"', '\\\"')
for exp in expressions:
# some expressions will contain references to "$resp"
exp = coerce_response_name(exp)
s = s.replace("%s", f"{{{exp}}}", 1)
return s
def coerce_response_name(s: str) -> str:
# In the sample config, the "$resp" keyword is used to refer to the
# item of interest as received by the corresponding calling form.
# For a 'regular', i.e. unary, synchronous, non-long-running method,
# it's the return value; for a server-streaming method, it's the iteration
# variable in the for loop that iterates over the return value, and for
# a long running promise, the user calls result on the method return value to
# resolve the future.
#
# The sample schema uses '$resp' as the special variable,
# but in the samples the 'response' variable is used instead.
return s.replace("$resp", "response")
def is_valid_sample_cfg(
doc,
min_version: Tuple[int, int, int] = MIN_SCHEMA_VERSION,
config_type: str = VALID_CONFIG_TYPE,
) -> bool:
"""Predicate that takes a parsed yaml doc checks if it is a valid sample config.
Arguments:
doc (Any): The yaml document to be assessed
min_version (Tuple[int, int, int]): (optional) The minimum valid version for
the sample config. Uses semantic version (major, minor, bugfix).
config_type (str): (optional) The valid type of the document.
Returns:
bool: True if doc is a valid sample config document.
"""
def parse_version(version_str: str) -> Tuple[int, ...]:
return tuple(int(tok) for tok in version_str.split("."))
version_token = "schema_version"
return bool(
# Yaml may return a dict, a list, or a str
isinstance(doc, dict)
and doc.get("type") == VALID_CONFIG_TYPE
and parse_version(doc.get(version_token, "")) >= min_version
and doc.get("samples")
)
def generate_all_sample_fpaths(path: str) -> Generator[str, None, None]:
"""Given file or directory path, yield all valid sample config fpaths recursively.
Arguments:
path (str): The file or directory path to check
for valid samplegen config files.
Directories are checked recursively.
Raises:
types.InvalidConfig: If 'path' is an invalid sampleconfig file
or 'path' is not a file or directory.
Returns:
Generator[str, None, None]: All valid samplegen config files
starting at 'path'.
"""
# If a user passes in a directory to search for sample configs,
# it is required to ignore any non-sample-config files so as to avoid
# being unhelpfully strict.
# Directly named files, however, should generate an error, because silently
# ignoring them is less helpful than failing loudly.
if os.path.isfile(path):
if not path.endswith('.yaml'):
raise types.InvalidConfig(f"Not a yaml file: {path}")
with open(path) as f:
if not any(is_valid_sample_cfg(doc)
for doc in yaml.safe_load_all(f.read())):
raise types.InvalidConfig(
f"No valid sample config in file: {path}")
yield path
# Note: if we ever need to recursively check directories for sample configs,
# add an "elif os.path.isdir(path)" yielding from os.walk right here.
else:
raise types.InvalidConfig(f"No such file: {path}")
|
892d55c68cd13be5e18eb4c012c454009bfdede9
|
8ed15d43652dbcab332c78923da416b91b139323
|
/python/fedml/cli/edge_deployment/client_constants.py
|
1c681aad95d6e836a4fa343f4f6467224a0c181d
|
[
"Apache-2.0"
] |
permissive
|
FedML-AI/FedML
|
74d144038c9de4a0621eb328d00987abac35e2d1
|
b436fbd95cbb62f6c58d2233d7affa0f62cb1817
|
refs/heads/master
| 2023-08-31T22:15:39.786371
| 2023-08-24T03:41:58
| 2023-08-24T03:41:58
| 281,519,510
| 3,197
| 807
|
Apache-2.0
| 2023-09-14T02:14:20
| 2020-07-21T22:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 19,804
|
py
|
client_constants.py
|
import os
import platform
import shutil
import signal
import subprocess
import sys
from os.path import expanduser
import psutil
import yaml
from ...cli.comm_utils.yaml_utils import load_yaml_config
class ClientConstants(object):
MSG_MLOPS_CLIENT_STATUS_OFFLINE = "OFFLINE"
MSG_MLOPS_CLIENT_STATUS_IDLE = "IDLE"
MSG_MLOPS_CLIENT_STATUS_UPGRADING = "UPGRADING"
MSG_MLOPS_CLIENT_STATUS_QUEUED = "QUEUED"
MSG_MLOPS_CLIENT_STATUS_INITIALIZING = "INITIALIZING"
MSG_MLOPS_CLIENT_STATUS_TRAINING = "TRAINING"
MSG_MLOPS_CLIENT_STATUS_STOPPING = "STOPPING"
MSG_MLOPS_CLIENT_STATUS_KILLED = "KILLED"
MSG_MLOPS_CLIENT_STATUS_FAILED = "FAILED"
MSG_MLOPS_CLIENT_STATUS_FINISHED = "FINISHED"
MSG_MLOPS_SERVER_DEVICE_STATUS_OFFLINE = "OFFLINE"
MSG_MLOPS_SERVER_DEVICE_STATUS_IDLE = "IDLE"
MSG_MLOPS_SERVER_DEVICE_STATUS_STARTING = "STARTING"
MSG_MLOPS_SERVER_DEVICE_STATUS_RUNNING = "RUNNING"
MSG_MLOPS_SERVER_DEVICE_STATUS_STOPPING = "STOPPING"
MSG_MLOPS_SERVER_DEVICE_STATUS_KILLED = "KILLED"
MSG_MLOPS_SERVER_DEVICE_STATUS_FAILED = "FAILED"
MSG_MLOPS_SERVER_DEVICE_STATUS_FINISHED = "FINISHED"
# Device Status
MSG_MLOPS_DEVICE_STATUS_IDLE = "IDLE"
MSG_MLOPS_DEVICE_STATUS_UPGRADING = "UPGRADING"
MSG_MLOPS_DEVICE_STATUS_RUNNING = "RUNNING"
MSG_MLOPS_DEVICE_STATUS_OFFLINE = "OFFLINE"
# Run Status
MSG_MLOPS_RUN_STATUS_QUEUED = "QUEUED"
MSG_MLOPS_RUN_STATUS_STARTING = "STARTING"
MSG_MLOPS_RUN_STATUS_RUNNING = "RUNNING"
MSG_MLOPS_RUN_STATUS_STOPPING = "STOPPING"
MSG_MLOPS_RUN_STATUS_KILLED = "KILLED"
MSG_MLOPS_RUN_STATUS_FAILED = "FAILED"
MSG_MLOPS_RUN_STATUS_FINISHED = "FINISHED"
LOCAL_HOME_RUNNER_DIR_NAME = 'fedml-client'
LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
LOCAL_PACKAGE_HOME_DIR_NAME = "fedml_packages"
CLIENT_LOGIN_PROGRAM = "client_login.py"
CLIENT_BOOTSTRAP_LINUX_PROGRAM = "bootstrap.sh"
CLIENT_BOOTSTRAP_WIN_PROGRAM = "bootstrap.bat"
FEDML_OTA_CMD_UPGRADE = "upgrade"
FEDML_OTA_CMD_RESTART = "restart"
LOCAL_CLIENT_API_PORT = 40800
LOGIN_MODE_CLIEN_INDEX = 0
LOGIN_MODE_EDGE_SIMULATOR_INDEX = 1
login_role_list = ["client", "edge_simulator"]
@staticmethod
def get_fedml_home_dir():
home_dir = expanduser("~")
fedml_home_dir = os.path.join(home_dir, ClientConstants.LOCAL_HOME_RUNNER_DIR_NAME)
if not os.path.exists(fedml_home_dir):
os.makedirs(fedml_home_dir, exist_ok=True)
return fedml_home_dir
@staticmethod
def get_log_file_dir():
log_file_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "logs")
if not os.path.exists(log_file_dir):
os.makedirs(log_file_dir, exist_ok=True)
return log_file_dir
@staticmethod
def get_data_dir():
data_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "data")
if not os.path.exists(data_dir):
os.makedirs(data_dir, exist_ok=True)
return data_dir
@staticmethod
def get_package_download_dir():
package_download_dir = os.path.join(ClientConstants.get_fedml_home_dir(),
ClientConstants.LOCAL_PACKAGE_HOME_DIR_NAME)
if not os.path.exists(package_download_dir):
os.makedirs(package_download_dir, exist_ok=True)
return package_download_dir
@staticmethod
def get_package_unzip_dir():
package_unzip_dir = ClientConstants.get_package_download_dir()
if not os.path.exists(package_unzip_dir):
os.makedirs(package_unzip_dir, exist_ok=True)
return package_unzip_dir
@staticmethod
def get_package_run_dir(package_name):
package_file_no_extension = str(package_name).split('.')[0]
package_run_dir = os.path.join(ClientConstants.get_package_unzip_dir(),
package_file_no_extension)
if not os.path.exists(package_run_dir):
os.makedirs(package_run_dir, exist_ok=True)
return package_run_dir
@staticmethod
def get_model_cache_dir():
model_cache_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "model_cache")
if not os.path.exists(model_cache_dir):
os.makedirs(model_cache_dir, exist_ok=True)
return model_cache_dir
@staticmethod
def get_database_dir():
database_dir = os.path.join(ClientConstants.get_data_dir(), "database")
if not os.path.exists(database_dir):
os.makedirs(database_dir, exist_ok=True)
return database_dir
@staticmethod
def cleanup_run_process(run_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-sub-process-v2.id")
if not os.path.exists(process_id_file):
return
process_info = load_yaml_config(process_id_file)
if run_id is None:
for run_id_key, process_id_value in process_info.items():
ClientConstants.cleanup_run_process(run_id_key)
return
process_id = process_info.get(str(run_id), None)
if process_id is not None:
try:
process = psutil.Process(process_id)
for sub_process in process.children():
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(sub_process.pid))
else:
os.kill(sub_process.pid, signal.SIGKILL)
if process is not None:
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(process.pid))
else:
os.kill(process.pid, signal.SIGKILL)
except Exception as e:
pass
process_info.pop(str(run_id))
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def save_run_process(run_id, process_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-sub-process-v2.id")
if os.path.exists(process_id_file):
process_info = load_yaml_config(process_id_file)
else:
process_info = dict()
process_info[str(run_id)] = process_id
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def cleanup_learning_process(run_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-learning-process-v2.id")
if not os.path.exists(process_id_file):
return
process_info = load_yaml_config(process_id_file)
if run_id is None:
for run_id_key, process_id_value in process_info.items():
ClientConstants.cleanup_learning_process(run_id_key)
return
process_id = process_info.get(str(run_id), None)
if process_id is not None:
try:
process = psutil.Process(process_id)
for sub_process in process.children():
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(sub_process.pid))
else:
os.kill(sub_process.pid, signal.SIGTERM)
if process is not None:
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(process.pid))
else:
os.kill(process.pid, signal.SIGTERM)
except Exception as e:
pass
process_info.pop(str(run_id))
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def save_learning_process(run_id, learning_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-learning-process-v2.id")
if os.path.exists(process_id_file):
process_info = load_yaml_config(process_id_file)
else:
process_info = dict()
process_info[str(run_id)] = learning_id
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def cleanup_bootstrap_process(run_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-bootstrap-process-v2.id")
if not os.path.exists(process_id_file):
return
process_info = load_yaml_config(process_id_file)
if run_id is None:
for run_id_key, process_id_value in process_info.items():
ClientConstants.cleanup_bootstrap_process(run_id_key)
return
process_id = process_info.get(str(run_id), None)
if process_id is not None:
try:
process = psutil.Process(process_id)
for sub_process in process.children():
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(sub_process.pid))
else:
os.kill(sub_process.pid, signal.SIGTERM)
if process is not None:
if platform.system() == 'Windows':
os.system("taskkill /PID {} /T /F".format(process.pid))
else:
os.kill(process.pid, signal.SIGTERM)
except Exception as e:
pass
process_info.pop(str(run_id))
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def save_bootstrap_process(run_id, process_id):
try:
local_pkg_data_dir = ClientConstants.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner-bootstrap-process-v2.id")
if os.path.exists(process_id_file):
process_info = load_yaml_config(process_id_file)
else:
process_info = dict()
process_info[str(run_id)] = process_id
ClientConstants.generate_yaml_doc(process_info, process_id_file)
except Exception as e:
pass
@staticmethod
def save_runner_infos(unique_device_id, edge_id, run_id=None):
local_pkg_data_dir = ClientConstants.get_data_dir()
os.makedirs(local_pkg_data_dir, exist_ok=True)
os.makedirs(os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True)
runner_info_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"runner_infos.yaml")
running_info = dict()
running_info["unique_device_id"] = str(unique_device_id)
running_info["edge_id"] = str(edge_id)
running_info["run_id"] = run_id
ClientConstants.generate_yaml_doc(running_info, runner_info_file)
@staticmethod
def save_training_infos(edge_id, training_status):
local_pkg_data_dir = ClientConstants.get_data_dir()
os.makedirs(local_pkg_data_dir, exist_ok=True)
os.makedirs(os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True)
training_info_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"training_infos.yaml")
training_info = dict()
training_info["edge_id"] = edge_id
training_info["training_status"] = str(training_status)
ClientConstants.generate_yaml_doc(training_info, training_info_file)
@staticmethod
def get_training_infos():
local_pkg_data_dir = ClientConstants.get_data_dir()
training_info_file = os.path.join(local_pkg_data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
"training_infos.yaml")
training_info = dict()
training_info["edge_id"] = 0
training_info["training_status"] = "INITIALIZING"
try:
training_info = load_yaml_config(training_info_file)
except Exception as e:
pass
return training_info
@staticmethod
def get_docker_location_file():
dock_loc_path = os.path.join(ClientConstants.get_data_dir(), "docker-location.yml")
return dock_loc_path
@staticmethod
def generate_yaml_doc(run_config_object, yaml_file):
try:
file = open(yaml_file, 'w', encoding='utf-8')
yaml.dump(run_config_object, file)
file.close()
except Exception as e:
pass
@staticmethod
def exit_process(process):
if process is None:
return
try:
process.terminate()
process.join()
process = None
except Exception as e:
pass
@staticmethod
def exec_console_with_script(script_path, should_capture_stdout=False, should_capture_stderr=False):
stdout_flag = subprocess.PIPE if should_capture_stdout else sys.stdout
stderr_flag = subprocess.PIPE if should_capture_stderr else sys.stderr
if platform.system() == 'Windows':
script_process = subprocess.Popen(script_path, stdout=stdout_flag, stderr=stderr_flag,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
script_process = subprocess.Popen(['bash', '-c', script_path], stdout=stdout_flag, stderr=stderr_flag,
preexec_fn=os.setsid)
return script_process
@staticmethod
def exec_console_with_shell(shell, script_path, should_capture_stdout=False, should_capture_stderr=False):
stdout_flag = subprocess.PIPE if should_capture_stdout else sys.stdout
stderr_flag = subprocess.PIPE if should_capture_stderr else sys.stderr
if platform.system() == 'Windows':
script_process = subprocess.Popen([shell, script_path], stdout=stdout_flag, stderr=stderr_flag,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
script_process = subprocess.Popen([shell, script_path], stdout=stdout_flag, stderr=stderr_flag,
preexec_fn=os.setsid)
return script_process
@staticmethod
def exec_console_with_shell_script_list(shell_script_list, should_capture_stdout=False,
should_capture_stderr=False):
stdout_flag = subprocess.PIPE if should_capture_stdout else sys.stdout
stderr_flag = subprocess.PIPE if should_capture_stderr else sys.stderr
if platform.system() == 'Windows':
script_process = subprocess.Popen(shell_script_list, stdout=stdout_flag, stderr=stderr_flag,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
script_process = subprocess.Popen(shell_script_list, stdout=stdout_flag, stderr=stderr_flag,
preexec_fn=os.setsid)
return script_process
@staticmethod
def get_console_pipe_out_err_results(script_process):
exec_out, exec_err = script_process.communicate()
return script_process.returncode, exec_out, exec_err
@staticmethod
def get_console_sys_out_pipe_err_results(script_process):
pipe_out, pipe_err = script_process.communicate()
exec_out, exec_err = sys.stdout, pipe_err
return script_process.returncode, exec_out, exec_err
@staticmethod
def print_console_output(script_process):
for info in iter(script_process.stdout.readline, ""):
print(info)
for info in iter(script_process.stderr.readline, ""):
print(info)
@staticmethod
def get_device_state_from_run_edge_state(run_edge_state):
ret_state = ClientConstants.MSG_MLOPS_DEVICE_STATUS_IDLE
if run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
ret_state = ClientConstants.MSG_MLOPS_DEVICE_STATUS_OFFLINE
elif run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
ret_state = ClientConstants.MSG_MLOPS_DEVICE_STATUS_UPGRADING
elif run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_QUEUED or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_TRAINING or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_STOPPING:
ret_state = ClientConstants.MSG_MLOPS_DEVICE_STATUS_RUNNING
elif run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
run_edge_state == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
ret_state = ClientConstants.MSG_MLOPS_DEVICE_STATUS_IDLE
return ret_state
@staticmethod
def is_client_running(status):
if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED or \
status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE or \
status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
return False
return True
if __name__ == "__main__":
ignore = "*test*,abc*"
ignore = tuple(ignore.split(','))
shutil.rmtree("/Users/alexliang/fedml-test/examples2", ignore_errors=True)
shutil.copytree("/Users/alexliang/fedml-test/examples",
"/Users/alexliang/fedml-test/examples2",
ignore=shutil.ignore_patterns(*ignore))
script_process = ClientConstants.exec_console_with_shell_script_list(
['sh', '-c', "while [ 1 = 1 ]; do echo 'hello'; sleep 1; done "])
ClientConstants.print_console_output(script_process)
ret_code, out, err = ClientConstants.get_console_pipe_out_err_results(script_process)
print("script process {}".format(script_process.pid))
|
aa919262486d210842e711a0d714398e17c85d6b
|
6d652aa802d90571a640ac0b538ff3055d0e34c5
|
/tests/test_uname.py
|
98c09d32b345a1f0c25fce3c70c7dc3c336a5d58
|
[
"MIT"
] |
permissive
|
enkore/i3pystatus
|
38eaea8203ed309ff90e1717bd3a9075f12590b0
|
0820dd4e3d479dddec7797b2ea9a83da0f62b7cf
|
refs/heads/current
| 2023-08-18T11:36:18.296269
| 2023-04-25T20:56:08
| 2023-04-25T20:56:08
| 8,130,605
| 438
| 244
|
MIT
| 2023-08-13T12:13:33
| 2013-02-11T01:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 301
|
py
|
test_uname.py
|
import os
from i3pystatus import uname
def test_uname():
KEYS = ("sysname", "nodename", "release", "version", "machine")
uref = os.uname()
for key in KEYS:
um = uname.Uname(format="{" + key + "}")
um.init()
assert um.output["full_text"] == getattr(uref, key)
|
03901987d7c71148ff799061904534842c87be20
|
753aafa747871f556600b28dbb867298132b1e6b
|
/supervisely/api/agent_api.py
|
deb93a60c025fc4fd71d53e13f7fe0823be654c4
|
[
"Apache-2.0"
] |
permissive
|
supervisely/supervisely
|
85dd63e5ccb590b2861271ef7bd5401aa2a99038
|
f0df756b8fb89364202fde54e6ef5fe89fca089d
|
refs/heads/master
| 2023-08-27T07:29:57.682377
| 2023-08-24T13:17:31
| 2023-08-24T13:17:31
| 140,302,908
| 447
| 91
|
Apache-2.0
| 2023-09-13T11:11:09
| 2018-07-09T15:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
agent_api.py
|
# coding: utf-8
"""api for working with agent"""
from __future__ import annotations
from typing import NamedTuple, Optional, Dict, List
from enum import Enum
from supervisely.api.module_api import ApiField, ModuleApi, ModuleWithStatus
class AgentNotFound(Exception):
"""class AgentNotFound"""
pass
class AgentNotRunning(Exception):
"""class AgentNotRunning"""
pass
class AgentApi(ModuleApi, ModuleWithStatus):
"""
API for working with agent. :class:`AgentApi<AgentApi>` object is immutable.
:param api: API connection to the server
:type api: Api
:Usage example:
.. code-block:: python
import os
from dotenv import load_dotenv
import supervisely as sly
# Load secrets and create API object from .env file (recommended)
# Learn more here: https://developer.supervisely.com/getting-started/basics-of-authentication
if sly.is_development():
load_dotenv(os.path.expanduser("~/supervisely.env"))
api = sly.Api.from_env()
# Pass values into the API constructor (optional, not recommended)
# api = sly.Api(server_address="https://app.supervise.ly", token="4r47N...xaTatb")
team_id = 8
agents = api.agent.get_list(team_id)
"""
class Status(Enum):
"""Agent status."""
WAITING = "waiting"
""""""
RUNNING = "running"
""""""
@staticmethod
def info_sequence():
"""
NamedTuple AgentInfo information about Agent.
:Example:
.. code-block:: python
AgentInfo("some info")
"""
return [
ApiField.ID,
ApiField.NAME,
ApiField.TOKEN,
ApiField.STATUS,
ApiField.USER_ID,
ApiField.TEAM_ID,
ApiField.CAPABILITIES,
ApiField.CREATED_AT,
ApiField.UPDATED_AT,
]
@staticmethod
def info_tuple_name():
"""
NamedTuple name - **AgentInfo**.
"""
return "AgentInfo"
def __init__(self, api):
ModuleApi.__init__(self, api)
ModuleWithStatus.__init__(self)
def get_list(
self, team_id: int, filters: Optional[List[Dict[str, str]]] = None
) -> List[NamedTuple]:
"""
List of all agents in the given Team.
:param team_id: Team ID in Supervisely.
:type team_id: int
:param filters: List of params to sort output Agents.
:type filters: List[dict], optional
:return: List of Agents with information. See :class:`info_sequence<info_sequence>`
:rtype: :class:`List[NamedTuple]`
:Usage example:
.. code-block:: python
import supervisely as sly
os.environ['SERVER_ADDRESS'] = 'https://app.supervise.ly'
os.environ['API_TOKEN'] = 'Your Supervisely API Token'
api = sly.Api.from_env()
team_id = 16087
agents = api.agent.get_list(team_id)
filter_agents = api.agent.get_list(team_id, filters=[{ 'field': 'name', 'operator': '=', 'value': 'Gorgeous Chicken' }])
"""
return self.get_list_all_pages("agents.list", {"teamId": team_id, "filter": filters or []})
def get_info_by_id(self, id: int) -> NamedTuple:
"""
Get Agent information by ID.
:param id: Agent ID in Supervisely.
:type id: int
:return: Information about Agent. See :class:`info_sequence<info_sequence>`
:rtype: :class:`NamedTuple`
:Usage example:
.. code-block:: python
import supervisely as sly
os.environ['SERVER_ADDRESS'] = 'https://app.supervise.ly'
os.environ['API_TOKEN'] = 'Your Supervisely API Token'
api = sly.Api.from_env()
agent = api.agent.get_info_by_id(7)
"""
return self._get_info_by_id(id, "agents.info")
def get_status(self, id: int) -> AgentApi.Status:
"""
Status object containing status of Agent: waiting or running.
:param id: Agent ID in Supervisely.
:type id: int
:return: Agent Status
:rtype: :class:`Status<supervisely.api.agent_api.AgentApi.Status>`
:Usage example:
.. code-block:: python
import supervisely as sly
os.environ['SERVER_ADDRESS'] = 'https://app.supervise.ly'
os.environ['API_TOKEN'] = 'Your Supervisely API Token'
api = sly.Api.from_env()
agent = api.agent.get_status(7)
"""
status_str = self.get_info_by_id(id).status
return self.Status(status_str)
def raise_for_status(self, status):
"""raise_for_status"""
pass
|
1f6e698c80d0d0444b3a8aae9232bacc7243abaf
|
cbdb4f017efda7feca4f46b5539265dd3c4f501d
|
/src/hypercorn/asyncio/worker_context.py
|
fe9ad1c74071ee3d971ab5e14b1b62264c291f06
|
[
"MIT"
] |
permissive
|
pgjones/hypercorn
|
7e02b94109f902fb43ff6fab54fc9d7282704300
|
86a04d1ad2f3af9946ed1669c9e92ffa75c2fa13
|
refs/heads/main
| 2023-07-21T00:43:34.596374
| 2023-07-08T17:36:47
| 2023-07-08T17:36:47
| 135,839,606
| 680
| 68
|
MIT
| 2023-09-03T11:13:18
| 2018-06-02T17:47:42
|
Python
|
UTF-8
|
Python
| false
| false
| 811
|
py
|
worker_context.py
|
from __future__ import annotations
import asyncio
from typing import Type, Union
from ..typing import Event
class EventWrapper:
def __init__(self) -> None:
self._event = asyncio.Event()
async def clear(self) -> None:
self._event.clear()
async def wait(self) -> None:
await self._event.wait()
async def set(self) -> None:
self._event.set()
def is_set(self) -> bool:
return self._event.is_set()
class WorkerContext:
event_class: Type[Event] = EventWrapper
def __init__(self) -> None:
self.terminated = self.event_class()
@staticmethod
async def sleep(wait: Union[float, int]) -> None:
return await asyncio.sleep(wait)
@staticmethod
def time() -> float:
return asyncio.get_event_loop().time()
|
3db0906e609dcf44dd1b2c27c82b6a40808573d2
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/confdb/syntax/system/aaa/hints.py
|
f3dd3e0d0aa7c6964116fd9baf5f20a631a459e4
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
hints.py
|
# ----------------------------------------------------------------------
# ConfDB hints system aaa
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ...defs import DEF
from ...patterns import IF_NAME, IP_ADDRESS, CHOICES
HINTS_SYSTEM_AAA = DEF(
"aaa",
[
DEF(
"service-type",
[
DEF(
CHOICES("local", "radius", "tacacs+", "ldap", "ad"),
[
DEF(
"default-address",
[DEF(IP_ADDRESS, name="ip", gen="make_system_aaa_default_address")],
),
DEF(
"default-interface",
[
DEF(
IF_NAME,
name="interface",
gen="make_system_aaa_default_interface",
)
],
),
],
name="type",
required=True,
)
],
),
],
)
|
ea0daece3a3f0cf159271489371ff690da8007ab
|
2012ff27cab53907b74e3212780331024b2ed2d5
|
/retriever/lib/engine.py
|
0f181ab462323571e5a902a20040a88e6d58700e
|
[
"MIT"
] |
permissive
|
weecology/retriever
|
7337ba658e5134e68afa1bc42f2c5023c061acc4
|
37982577eca010a03dd5b5e23fe30be8f42da9ed
|
refs/heads/main
| 2023-08-17T18:30:07.349214
| 2023-04-16T19:41:10
| 2023-04-16T19:41:10
| 1,968,190
| 289
| 166
|
NOASSERTION
| 2023-04-16T23:21:38
| 2011-06-28T19:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 45,760
|
py
|
engine.py
|
import csv
import getpass
import gzip
import os
import re
import shutil
import sys
import tarfile
import zipfile
from collections import OrderedDict
from math import ceil
from urllib.request import urlretrieve
from urllib.error import HTTPError
import requests
from requests.exceptions import InvalidSchema
from setuptools import archive_util
from tqdm import tqdm
from retriever.lib.cleanup import no_cleanup
from retriever.lib.defaults import DATA_DIR, DATA_SEARCH_PATHS, DATA_WRITE_PATH, ENCODING, KAGGLE_TOKEN_PATH
from retriever.lib.tools import (
open_fr,
open_fw,
open_csvw,
walk_relative_path,
excel_csv,
)
from retriever.lib.engine_tools import geojson2csv
from retriever.lib.engine_tools import sqlite2csv
from retriever.lib.engine_tools import json2csv
from retriever.lib.engine_tools import xml2csv
from retriever.lib.engine_tools import hdf2csv
from retriever.lib.warning import Warning
class Engine():
"""A generic database system. Specific database platforms will inherit
from this class."""
_connection = None
_cursor = None
datatypes = []
db = None
debug = False
instructions = "Enter your database connection information:"
name = ""
pkformat = "%s PRIMARY KEY %s "
placeholder = None
required_opts = []
script = None
spatial_support = False
table = None
use_cache = True
warnings = []
script_table_registry = OrderedDict()
encoding = None
data_path = None
def connect(self, force_reconnect=False):
"""Create a connection."""
if force_reconnect:
self.disconnect()
if self._connection is None:
self._connection = self.get_connection()
return self._connection
connection = property(connect)
def disconnect(self):
"""Disconnect a connection."""
if self._connection:
self.connection.close()
self._connection = None
self._cursor = None
def disconnect_files(self):
"""Files systems should override this method.
Enables commit per file object.
"""
def get_connection(self):
"""This method should be overridden by specific implementations
of Engine."""
raise NotImplementedError
def add_to_table(self, data_source):
"""Adds data to a table from one or more lines specified
in engine.table.source."""
print('Installing {}'.format(self.table_name()))
# If the number of records are known avoid counting the lines
real_line_length = None
if self.table.number_of_records:
real_line_length = self.table.number_of_records
if self.table.columns[-1][1][0][:3] == "ct-":
# cross-tab data
if not real_line_length:
real_line_length = self.get_ct_line_length(gen_from_source(data_source))
real_lines = self.get_ct_data(gen_from_source(data_source))
else:
real_lines = gen_from_source(data_source)
if not real_line_length:
len_source = gen_from_source(data_source)
real_line_length = sum(1 for _ in len_source)
total = self.table.record_id + real_line_length
count_iter = 1
insert_limit = self.insert_limit
types = self.table.get_column_datatypes()
multiple_values = []
progress_bar = tqdm(desc='Progress', total=total, unit='rows')
line_values = None
for line in real_lines:
if line:
# Only process non empty lines
self.table.record_id += 1
line_values = self.table.values_from_line(line)
# Build insert statement with the correct number of values
try:
clean_values = [
self.format_insert_value(
self.table.cleanup.function(line_values[n],
self.table.cleanup.args),
types[n]) for n in range(len(line_values))
]
except Exception as e:
self.warning('Exception in line {}: {}'.format(
self.table.record_id, e))
continue
if line or count_iter == real_line_length:
if count_iter % insert_limit == 0 or count_iter == real_line_length:
# Add values to the list multiple_values
# if multiple_values list is full
# or we reached the last value in real_line_length
# execute the values in multiple_values
multiple_values.append(clean_values)
try:
insert_stmt = self.insert_statement(multiple_values)
except Exception as _:
if self.debug:
print(types)
if self.debug:
print(line_values)
if self.debug:
print(clean_values)
raise
try:
self.executemany(insert_stmt, multiple_values, commit=False)
except BaseException:
print(insert_stmt)
raise
multiple_values = []
else:
multiple_values.append(clean_values)
progress_bar.update()
count_iter += 1
progress_bar.close()
self.connection.commit()
def get_ct_line_length(self, lines):
"""Returns the number of real lines for cross-tab data"""
real_line_length = 0
for values in lines:
initial_cols = len(
self.table.columns) - (3 if hasattr(self.table, "ct_names") else 2)
# add one if auto increment is not
# set to get the right initial columns
if not self.table.columns[0][1][0] == "pk-auto":
initial_cols += 1
rest = values[initial_cols:]
real_line_length += len(rest)
return real_line_length
def get_ct_data(self, lines):
"""Create cross tab data."""
for values in lines:
initial_cols = len(
self.table.columns) - (3 if hasattr(self.table, "ct_names") else 2)
# add one if auto increment is not set to get the right initial columns
if not self.table.columns[0][1][0] == "pk-auto":
initial_cols += 1
begin = values[:initial_cols]
rest = values[initial_cols:]
n = 0
for item in rest:
if hasattr(self.table, "ct_names"):
name = [self.table.ct_names[n]]
n += 1
else:
name = []
yield begin + name + [item]
def auto_create_table(self, table, url=None, filename=None, pk=None, make=True):
"""Create table automatically by analyzing a data source and
predicting column names, data types, delimiter, etc."""
if url and not filename:
filename = filename_from_url(url)
self.table = table
if url and not self.find_file(filename):
# If the file doesn't exist, download it
self.download_file(url, filename)
file_path = self.find_file(filename)
if not self.table.delimiter:
self.set_table_delimiter(file_path)
if self.table.header_rows > 0 and not self.table.columns:
source = (skip_rows, (self.table.header_rows - 1, self.load_data(file_path)))
lines = gen_from_source(source)
header = next(lines)
lines.close()
source = (skip_rows, (self.table.header_rows, self.load_data(file_path)))
lines = gen_from_source(source)
columns, _ = self.table.auto_get_columns(header)
self.auto_get_datatypes(pk, lines, columns)
if (self.table.columns[-1][1][0][:3] == "ct-" and
hasattr(self.table, "ct_names") and
self.table.ct_column not in [c[0] for c in self.table.columns]):
self.table.columns = (self.table.columns[:-1] + [(self.table.ct_column,
("char", 50))] +
[self.table.columns[-1]])
if not make:
return self.table
return self.create_table()
def auto_get_datatypes(self, pk, source, columns):
"""Determine data types for each column.
For string columns adds an additional 100 characters to the maximum
observed value to provide extra space for cases where special characters
are counted differently by different engines.
"""
# Get all values for each column
lines_to_scan = source
# set default column data types as int
column_types = [('int',)] * len(columns)
max_lengths = [0] * len(columns)
# Check the values for each column to determine data type
for values in lines_to_scan:
if values:
for i in range(len(columns)):
try:
val = str(values[i]).strip()
if not val:
continue
if self.table.cleanup.function != no_cleanup: # pylint: disable=W0143
val = self.table.cleanup.function(val,
self.table.cleanup.args)
if val and val.strip():
# Find length using val.encode() to cater for various
# encoded char for `char` types
if len(val.encode()) > max_lengths[i]:
max_lengths[i] = len(val.encode())
if column_types[i][0] in ('int', 'bigint'):
try:
val = int(val)
if (column_types[i][0] == 'int' and
hasattr(self, 'max_int') and
val > self.max_int):
column_types[i] = [
'bigint',
]
except Exception as _:
column_types[i] = ('double',)
if column_types[i][0] == 'double':
try:
val = float(val)
if "e" in str(val) or ("." in str(val) and len(
str(val).split(".")[1]) > 10):
column_types[i] = ("decimal", "50,30")
except Exception as _:
column_types[i] = ('char', max_lengths[i])
if column_types[i][0] == 'char':
if len(val.encode()) > column_types[i][1]:
column_types[i] = ('char', max_lengths[i])
except IndexError:
continue
for i, value in enumerate(columns):
column = value
column[1] = column_types[i]
if pk == column[0]:
column[1][0] = "pk-" + column[1][0]
if pk is None and columns[0][1][0] == 'pk-auto':
self.table.columns = [("record_id", ("pk-auto",))]
self.table.contains_pk = True
else:
self.table.columns = []
for column in columns:
self.table.columns.append((column[0], tuple(column[1])))
def auto_get_delimiter(self, header):
"""Determine the delimiter.
Find out which of a set of common delimiters occurs most in the header
line and use this as the delimiter.
"""
self.table.delimiter = "\t"
for other_delimiter in [",", ";"]:
if header.count(other_delimiter) > header.count(self.table.delimiter):
self.table.delimiter = other_delimiter
def check_bulk_insert(self):
"""Check if a bulk insert could be performed on the data"""
# Determine if the dataset includes cross-tab data
ct = len([True for c in self.table.columns if c[1][0][:3] == "ct-"]) != 0
if (self.table.cleanup.function == no_cleanup # pylint: disable=W0143
and not self.table.fixed_width and not ct and
(not hasattr(self.table, "do_not_bulk_insert") or
not self.table.do_not_bulk_insert)):
return True
return False
def convert_data_type(self, datatype):
"""Convert Retriever generic data types to database platform specific
data types.
"""
# get the type from the dataset variables
key = datatype[0]
this_pk = False
if key[0:3] == "pk-":
key = key[3:]
this_pk = True
elif key[0:3] == "ct-":
key = key[3:]
# format the dataset type to match engine specific type
this_type = ""
if key in list(self.datatypes.keys()):
this_type = self.datatypes[key]
if isinstance(this_type, tuple):
if datatype[0] == 'pk-auto':
pass
elif len(datatype) > 1:
this_type = this_type[1] + "(" + str(datatype[1]) + ")"
else:
this_type = this_type[0]
else:
if len(datatype) > 1:
this_type += "(" + str(datatype[1]) + ")"
# set the PRIMARY KEY
if this_pk:
if isinstance(this_type, tuple):
this_type = self.pkformat % this_type
else:
this_type = self.pkformat % (this_type, "")
return this_type
def create_db(self):
"""Create a new database based on settings supplied in Database object
engine.db."""
db_name = self.database_name()
if db_name:
print("Creating database " + db_name + "...\n")
# Create the database
create_stmt = self.create_db_statement()
if self.debug:
print(create_stmt)
try:
self.execute(create_stmt)
except Exception as e:
try:
self.connection.rollback()
except Exception as _:
pass
print(e)
print("Installing into existing database")
def create_db_statement(self):
"""Return SQL statement to create a database."""
create_stmt = "CREATE DATABASE " + self.database_name()
return create_stmt
def create_raw_data_dir(self, path=None):
"""Check to see if the archive directory exists and creates it if
necessary."""
if not path:
path = self.format_data_dir()
if not os.path.exists(path):
os.makedirs(path)
def create_table(self):
"""Create new database table based on settings supplied in Table
object engine.table."""
# Try to drop the table if it exists; this may cause an exception if it
# doesn't exist, so ignore exceptions
try:
self.execute(self.drop_statement("TABLE", self.table_name()))
except Exception as _:
pass
create_stmt = self.create_table_statement()
if self.debug:
print(create_stmt)
try:
self.execute(create_stmt)
self.register_tables()
if self.table.name not in self.script.tables:
self.script.tables[self.table.name] = self.table
except Exception as e:
try:
self.connection.rollback()
except Exception as _:
pass
print(e)
print("Replacing existing table")
def register_tables(self):
"""Register table names of scripts"""
if self.script.name not in self.script_table_registry:
self.script_table_registry[self.script.name] = []
self.script_table_registry[self.script.name].append(
(self.table_name(), self.table))
def create_table_statement(self):
"""Return SQL statement to create a table."""
create_stmt = "CREATE TABLE " + self.table_name() + " ("
columns = self.table.get_insert_columns(join=False, create=True)
types = []
for column in self.table.columns:
for column_name in columns:
if column[0] == column_name:
types.append(self.convert_data_type(column[1]))
if self.debug:
print(columns)
column_strings = []
for c, t in zip(columns, types):
column_strings.append(c + ' ' + t)
create_stmt += ', '.join(column_strings)
create_stmt += " );"
return create_stmt
def database_name(self, name=None):
"""Return name of the database."""
if not name:
try:
name = self.script.name
except AttributeError:
name = "{db}"
try:
db_name = self.opts["database_name"].format(db=name)
except KeyError:
db_name = name
return db_name.replace('-', '_')
def download_file(self, url, filename):
"""Download file to the raw data directory."""
if not self.find_file(filename) or not self.use_cache:
path = self.format_filename(filename)
self.create_raw_data_dir()
progbar = tqdm(
unit='B',
unit_scale=True,
unit_divisor=1024,
miniters=1,
desc='Downloading {}'.format(filename),
)
if hasattr(self.script, "socrata"):
return self.download_from_socrata(url, path, progbar)
else:
return self.download_response(url, path, progbar)
def download_from_socrata(self, url, path, progbar):
"""Download files from Socrata to the raw data directory"""
try:
filename = url.split('/')[-1]
row_count_url = url
row_count_url = row_count_url.replace(
url.split('/')[-1],
filename.partition('.')[0] +
"?$query=select%20count(*)%20as%20COLUMN_ALIAS_GUARD__count")
row_count = requests.get(row_count_url)
result = row_count.json()
rows = result[0]["COLUMN_ALIAS_GUARD__count"]
url = url + "?$limit=" + rows
return self.download_response(url, path, progbar)
except:
print("The download url {} is incorrect!!".format(url))
return False
def download_response(self, url, path, progbar):
"""Returns True|None according to the download GET response"""
try:
response = requests.get(
url,
allow_redirects=True,
stream=True,
headers={
'user-agent':
'Weecology/Data-Retriever Package Manager: http://www.data-retriever.org/'
},
hooks={'response': reporthook(progbar, path)})
if response.status_code == 404:
print("The data source or server may be redirected or not found")
except InvalidSchema:
try:
urlretrieve(url, path, reporthook=reporthook(progbar))
except HTTPError as e:
print("HTTPError :", e)
return None
self.use_cache = True
progbar.close()
return True
def download_from_kaggle(
self,
data_source,
dataset_name,
archive_dir,
archive_full_path,
):
"""Download files from Kaggle into the raw data directory"""
kaggle_token = os.path.isfile(KAGGLE_TOKEN_PATH)
kaggle_username = os.getenv('KAGGLE_USERNAME', "").strip()
kaggle_key = os.getenv('KAGGLE_KEY', "").strip()
if kaggle_token or (kaggle_username and kaggle_key):
from kaggle.api.kaggle_api_extended import KaggleApi
from kaggle.rest import ApiException
else:
print(f"Could not find kaggle.json. Make sure it's located at "
f"{KAGGLE_TOKEN_PATH}. Or available in the environment variables. "
f"For more information "
f"checkout https://github.com/Kaggle/kaggle-api#api-credentials")
return
api = KaggleApi()
api.authenticate()
archive_full_path = archive_full_path + ".zip"
if data_source == "dataset":
try:
api.dataset_download_files(dataset=dataset_name,
path=archive_dir,
quiet=False,
force=True)
file_names = self.extract_zip(archive_full_path, archive_dir)
except ApiException as e:
print(f"Could not download '{dataset_name}' from kaggle datasets.")
print("Response :", e.body.decode("unicode_escape"))
return []
else:
try:
api.competition_download_files(competition=dataset_name,
path=archive_dir,
quiet=False,
force=True)
file_names = self.extract_zip(archive_full_path, archive_dir)
except ApiException as e:
print(f"Could not download '{dataset_name}' from kaggle competitions.")
print("Response :", e.body.decode("unicode_escape"))
return []
return file_names
def download_files_from_archive(
self,
url,
file_names=None,
archive_type="zip",
keep_in_dir=False,
archive_name=None,
):
"""Download files from an archive into the raw data directory."""
if not archive_name:
archive_name = filename_from_url(url)
else:
archive_name = self.format_filename(archive_name)
archive_full_path = self.format_filename(archive_name)
archive_dir = self.format_data_dir()
if keep_in_dir:
archive_base = os.path.splitext(os.path.basename(archive_name))[0]
archive_dir = (self.data_path if self.data_path else os.path.join(
DATA_WRITE_PATH, archive_base))
archive_dir = archive_dir.format(dataset=self.script.name)
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
if hasattr(self.script, "kaggle"):
file_names = self.download_from_kaggle(data_source=self.script.data_source,
dataset_name=url,
archive_dir=archive_dir,
archive_full_path=archive_full_path)
else:
if not file_names:
self.download_file(url, archive_name)
if archive_type in ('tar', 'tar.gz'):
file_names = self.extract_tar(archive_full_path, archive_dir,
archive_type)
elif archive_type == 'zip':
file_names = self.extract_zip(archive_full_path, archive_dir)
elif archive_type == 'gz':
file_names = self.extract_gz(archive_full_path, archive_dir)
return file_names
archive_downloaded = bool(self.data_path)
for file_name in file_names:
archive_full_path = self.format_filename(archive_name)
if not self.find_file(os.path.join(archive_dir, file_name)):
# if no local copy, download the data
self.create_raw_data_dir()
if not archive_downloaded:
self.download_file(url, archive_name)
archive_downloaded = True
if archive_type == 'zip':
self.extract_zip(archive_full_path, archive_dir, file_name)
elif archive_type == 'gz':
self.extract_gz(archive_full_path, archive_dir, file_name)
elif archive_type in ('tar', 'tar.gz'):
self.extract_tar(archive_full_path, archive_dir, archive_type,
file_name)
return file_names
def drop_statement(self, object_type, object_name):
"""Return drop table or database SQL statement."""
if self:
drop_statement = "DROP %s IF EXISTS %s" % (object_type, object_name)
return drop_statement
def execute(self, statement, commit=True):
"""Execute given statement."""
self.cursor.execute(statement)
if commit:
self.connection.commit()
def executemany(self, statement, values, commit=True):
"""Execute given statement with multiple values."""
self.cursor.executemany(statement, values)
if commit:
self.connection.commit()
def excel_to_csv(self, src_path, path_to_csv, excel_info=None, encoding=ENCODING):
"""Convert excel files to csv files."""
if self.find_file(src_path) and excel_info:
excel_csv(src_path, path_to_csv, excel_info, encoding)
def process_geojson2csv(self, src_path, path_to_csv, encoding=ENCODING):
if self.find_file(src_path):
geojson2csv(src_path, path_to_csv, encoding)
def process_sqlite2csv(self,
src_path,
path_to_csv,
table_name=None,
encoding=ENCODING):
"""Process sqlite database to csv files."""
if self.find_file(src_path):
sqlite2csv(src_path, path_to_csv, table_name, encoding)
def process_json2csv(self, src_path, path_to_csv, headers, encoding=ENCODING):
if self.find_file(src_path):
json2csv(input_file=src_path,
output_file=path_to_csv,
header_values=headers,
encoding=encoding,
row_key=None)
def process_xml2csv(self,
src_path,
path_to_csv,
header_values=None,
empty_rows=1,
encoding=ENCODING):
if self.find_file(src_path):
xml2csv(src_path, path_to_csv, header_values, empty_rows, encoding)
def process_hdf52csv(self,
src_path,
path_to_csv,
data_name,
data_type,
encoding=ENCODING):
if self.find_file(src_path):
hdf2csv(src_path, path_to_csv, data_name, data_type, encoding=ENCODING)
def extract_gz(
self,
archive_path,
archivedir_write_path,
file_name=None,
open_archive_file=None,
archive=None,
):
"""Extract gz files.
Extracts a given file name or all the files in the gz.
"""
if file_name:
open_archive_file = gzip.open(archive_path, 'r')
self.write_fileobject(
archivedir_write_path,
file_name,
file_obj=open_archive_file,
open_object=False,
)
if 'archive' in locals() and archive:
archive.close()
return [file_name]
files_before = set(walk_relative_path(archivedir_write_path))
archive_util.unpack_archive(archive_path, archivedir_write_path)
files_after = set(walk_relative_path(archivedir_write_path))
unpacked_files = files_after - files_before
return list(unpacked_files)
def extract_tar(self,
archive_path,
archivedir_write_path,
archive_type,
file_name=None):
"""Extract tar or tar.gz files.
Extracts a given file name or the file in the tar or tar.gz.
# gzip archives can only contain a single file
"""
if archive_type in ('tar', 'tar.gz'):
if file_name:
archive = tarfile.open(archive_path, 'r')
open_archive_file = archive.extractfile(file_name)
self.write_fileobject(
archivedir_write_path,
file_name,
file_obj=open_archive_file,
open_object=False,
)
if 'archive' in locals():
archive.close()
return [file_name]
if archive_type == 'tar':
tar = tarfile.open(archive_path, 'r')
else:
tar = tarfile.open(archive_path, "r:gz")
file_names = tar.getnames()
tar.extractall(path=archivedir_write_path)
tar.close()
return file_names
return None
def extract_zip(self, archive_path, archivedir_write_path, file_name=None):
"""Extract zip files.
Extracts a given file name or the entire files in the archive.
"""
try:
archive = zipfile.ZipFile(archive_path)
if file_name:
if archive.testzip():
archive.getinfo(file_name).file_size += (2**64) - 1
open_archive_file = archive.open(file_name, 'r')
file_names = [file_name]
archive = None
file_obj = open_archive_file
open_object = False
else:
file_names = [
paths.filename
for paths in archive.infolist()
if not paths.filename.endswith('/')
]
file_obj = None
open_object = True
for f_name in file_names:
self.write_fileobject(archivedir_write_path, f_name, file_obj, archive,
open_object)
return file_names
except zipfile.BadZipFile as e:
print("\n{0} can't be extracted, "
"may be corrupt \n{1}".format(file_name, e))
def fetch_tables(self, dataset, table_names): # pylint: disable=W0613,R0201
"""This can be overridden to return the tables of sqlite db
as pandas data frame. Return False by default.
"""
return False
def final_cleanup(self):
"""Close the database connection."""
if self.warnings:
print('\n'.join(str(w) for w in self.warnings))
self.disconnect()
def find_file(self, filename):
"""Check for an existing datafile."""
if self.data_path:
file_path = os.path.normpath(
os.path.join(self.data_path, self.script.name, filename))
if file_exists(file_path):
return file_path
for search_path in DATA_SEARCH_PATHS:
search_path = (search_path.format(
dataset=self.script.name) if self.script else search_path)
file_path = os.path.normpath(os.path.join(search_path, filename))
if file_exists(file_path):
return file_path
return False
def format_data_dir(self):
"""Return correctly formatted raw data directory location."""
if self.data_path:
return os.path.join(self.data_path, self.script.name)
return DATA_WRITE_PATH.format(dataset=self.script.name)
def format_filename(self, filename):
"""Return full path of a file in the archive directory."""
return os.path.join(self.format_data_dir(), filename)
def format_insert_value(self, value, datatype): # pylint: disable=R0201
"""Format a value for an insert statement based on data type.
Different data types need to be formated differently to be properly
stored in database management systems. The correct formats are
obtained by:
1. Removing extra enclosing quotes
2. Harmonizing null indicators
3. Cleaning up badly formatted integers
4. Obtaining consistent float representations of decimals
"""
datatype = datatype.split('-')[-1]
str_value = str(value).strip()
# Remove any quotes already surrounding the string
quotes = ["'", '"']
if (len(str_value) > 1 and str_value[0] == str_value[-1] and
str_value[0] in quotes):
str_value = str_value[1:-1]
missing_values = ("null", "none")
if str_value.lower() in missing_values:
return None
if datatype in ("int", "bigint", "bool"):
if str_value:
intvalue = str_value.split('.')[0]
if intvalue:
return int(intvalue)
return None
return None
if datatype in ("double", "decimal"):
if str_value.strip():
try:
decimals = float(str(str_value))
return decimals
except Exception as _:
return None
return None
if datatype == "char":
if str_value.lower() in missing_values:
return None
return str_value
return None
def get_cursor(self):
"""Get db cursor."""
if self._cursor is None:
self._cursor = self.connection.cursor()
return self._cursor
cursor = property(get_cursor)
def get_input(self):
"""Manually get user input for connection information when script is
run from terminal."""
for opt in self.required_opts:
if opt[0] not in list(self.opts.keys()):
if opt[0] == "password":
print(opt[1])
self.opts[opt[0]] = getpass.getpass(" ")
else:
prompt = opt[1]
if opt[2]:
prompt += " or press Enter for the default, %s" % opt[2]
prompt += ': '
self.opts[opt[0]] = input(prompt)
if self.opts[opt[0]] in ["", "default"]:
self.opts[opt[0]] = opt[2]
if 'data_dir' in self.opts and self.opts['data_dir']:
if self.opts['data_dir'] != DATA_DIR:
if not os.path.exists(self.opts['data_dir']):
os.makedirs(self.opts['data_dir'])
def insert_data_from_archive(self, url, filenames):
"""Insert data from files located in an online archive. This function
extracts the file, inserts the data, and deletes the file if raw data
archiving is not set."""
self.download_files_from_archive(url, filenames)
for filename in filenames:
file_path = self.find_file(filename)
if file_path:
self.insert_data_from_file(file_path)
else:
raise Exception("File not found: %s" % filename)
def insert_data_from_file(self, filename):
"""The default function to insert data from a file. This function
simply inserts the data row by row. Database platforms with support
for inserting bulk data from files can override this function."""
data_source = (
skip_rows,
(self.table.header_rows, (self.load_data, (filename,))),
)
self.add_to_table(data_source)
def insert_data_from_url(self, url):
"""Insert data from a web resource, such as a text file."""
filename = filename_from_url(url)
find = self.find_file(filename)
if find and self.use_cache:
# Use local copy
self.insert_data_from_file(find)
else:
# Save a copy of the file locally, then load from that file
self.create_raw_data_dir()
print("\nSaving a copy of " + filename + "...")
self.download_file(url, filename)
self.insert_data_from_file(self.find_file(filename))
def insert_raster(self, path=None, srid=None):
"""Base function for installing raster data from path"""
def insert_statement(self, values):
"""Return SQL statement to insert a set of values."""
columns = self.table.get_insert_columns()
types = self.table.get_column_datatypes()
column_count = len(self.table.get_insert_columns(join=False, create=False))
for row in values:
row_length = len(row)
# Add None with appropriate value type for empty cells
for i in range(column_count - row_length):
row.append(self.format_insert_value(None, types[row_length + i]))
insert_stmt = "INSERT INTO {table}".format(table=self.table_name())
insert_stmt += " ( {columns} )".format(columns=columns)
insert_stmt += " VALUES ("
for i in range(0, column_count):
insert_stmt += "{}, ".format(self.placeholder)
insert_stmt = insert_stmt.rstrip(", ") + ")"
if self.debug:
print(insert_stmt)
return insert_stmt
def insert_vector(self, path=None, srid=None):
"""Base function for installing vector data from path"""
def set_engine_encoding(self):
"""Set up the encoding to be used."""
self.encoding = ENCODING.lower()
if self.script and self.script.encoding:
self.encoding = self.script.encoding.lower()
def set_table_delimiter(self, file_path):
"""Get the delimiter from the data file and set it."""
dataset_file = open_fr(file_path, encoding=self.encoding)
self.auto_get_delimiter(dataset_file.readline())
dataset_file.close()
def supported_raster(self, path, ext=None):
""""Spatial data is not currently supported for this database type
or file format. PostgreSQL is currently the only supported output
for spatial data."""
if self:
raise Exception("Not supported")
def table_name(self, name=None, dbname=None):
"""Return full table name."""
if not name:
name = self.table.name
if not dbname:
dbname = self.database_name()
if not dbname:
dbname = ''
return self.opts["table_name"].format(db=dbname, table=name)
def to_csv(self, sort=True, path=None, select_columns=None, select_table=None):
"""Create a CSV file from the a data store.
sort flag to create a sorted file,
path to write the flag else write to the PWD,
select_columns flag is used by large files to select
columns data and has SELECT LIMIT 3.
"""
# Due to Cyclic imports we can not move this import to the top
from retriever.lib.engine_tools import sort_csv
for table_name in self.script_table_registry[self.script.name]:
csv_file_output = os.path.normpath(
os.path.join(path if path else '', table_name[0] + '.csv'))
self.get_cursor()
self.set_engine_encoding()
csv_file = open_fw(csv_file_output, encoding=self.encoding)
csv_writer = open_csvw(csv_file)
limit = ""
cols = "*"
if select_columns:
limit = "LIMIT 3"
cols = ",".join(select_columns)
sql_query = "SELECT {cols} FROM {tab} {limit};"
self.cursor.execute(
sql_query.format(cols=cols, tab=table_name[0], limit=limit))
row = self.cursor.fetchone()
column_names = [
u'{}'.format(tuple_i[0]) for tuple_i in self.cursor.description
]
csv_writer.writerow(column_names)
while row is not None:
csv_writer.writerow(row)
row = self.cursor.fetchone()
csv_file.close()
if sort:
sort_csv(csv_file_output)
self.disconnect()
def warning(self, warning):
"""Create a warning message using the current script and table."""
new_warning = Warning('%s:%s' % (self.script.name, self.table.name), warning)
self.warnings.append(new_warning)
def write_fileobject(
self,
archivedir_write_path,
file_name,
file_obj=None,
archive=None,
open_object=False,
):
"""Write a file object from a archive object to a given path
open_object flag helps up with zip files, open the zip and the file
"""
write_path = self.format_filename(os.path.join(archivedir_write_path, file_name))
write_path = os.path.normpath(write_path)
if not os.path.exists(write_path):
# If the directory does not exits, create it
if not os.path.exists(os.path.dirname(write_path)):
os.makedirs(os.path.dirname(write_path))
try:
try:
unzipped_file = open(write_path, 'wb')
if open_object:
file_obj = archive.open(file_name, 'r')
if file_obj:
# use shutil to copy in chunks
shutil.copyfileobj(file_obj, unzipped_file, 64 * 1024)
finally: # Ensure closed files
if file_obj:
file_obj.close()
if unzipped_file:
unzipped_file.close()
except (shutil.Error, OSError, IOError) as e:
print('Error: ', e)
def load_data(self, filename):
"""Generator returning lists of values from lines in a data file.
1. Works on both delimited (csv module)
and fixed width data (extract_fixed_width)
2. Identifies the delimiter if not known
3. Removes extra line ending
"""
if hasattr(self.table, "csv_extend_size") and self.table.csv_extend_size:
set_csv_field_size()
if not self.table.delimiter:
self.set_table_delimiter(filename)
dataset_file = open_fr(filename, encoding=self.encoding)
if self.table.fixed_width:
for row in dataset_file:
yield self.extract_fixed_width(row)
else:
reg = re.compile("\\r\\n|\n|\r")
for row in csv.reader(dataset_file, delimiter=self.table.delimiter):
yield [reg.sub(" ", values) for values in row]
def extract_fixed_width(self, line):
"""Split line based on the fixed width, returns list of the values."""
pos = 0
values = []
for width in self.table.fixed_width:
values.append(line[pos:pos + width].strip())
pos += width
return values
def set_csv_field_size():
"""Set the CSV size limit based on the available resources"""
maxInt = sys.maxsize
decrement = True
while decrement:
try:
csv.field_size_limit(maxInt)
decrement = False
except OverflowError:
maxInt = int(maxInt / 10)
return maxInt
def skip_rows(rows, source):
"""Skip over the header lines by reading them before processing."""
lines = gen_from_source(source)
for _ in range(rows):
next(lines)
return lines
def file_exists(path):
"""Return true if a file exists and its size is greater than 0."""
return os.path.isfile(path) and os.path.getsize(path) > 0
def filename_from_url(url):
"""Extract and returns the filename from the url."""
return url.split('/')[-1].split('?')[0]
def gen_from_source(source):
"""Return generator from a source tuple.
Source tuples are of the form (callable, args) where callable(`star` args)
returns either a generator or another source tuple.
This allows indefinite regeneration of data sources.
"""
while isinstance(source, tuple):
gen, args = source
source = gen(*args)
return source
def reporthook(tqdm_inst, filename=None):
"""tqdm wrapper to generate progress bar for urlretriever"""
last_block = [0]
def update_to(count=1, block_size=1, total_size=None):
if total_size is not None:
tqdm_inst.total = total_size
tqdm_inst.update((count - last_block[0]) * block_size)
last_block[0] = count
def update_rto(r, *args, **kwargs): # pylint: disable=W0613
if r.headers.get('Transfer-Encoding', None) != 'chunked':
total_size = int(r.headers['content-length'])
tqdm_inst.total = ceil(total_size // (2 * 1024))
with open(filename, 'wb') as f:
for chunk in r.iter_content(2 * 1024):
f.write(chunk)
tqdm_inst.update(1)
f.close()
return update_rto if filename else update_to
|
65b5bd6eaeaf66eda2de04cb44abc05ad27c84bc
|
4ffc4c5e97e685fb976bbf8fb66ce0abea1562a1
|
/fido2/utils.py
|
1225bb4eb1cfd3ad95e4ee7b4c793fcb385856fa
|
[
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
Yubico/python-fido2
|
82508390afc8d8b8eae286c4f3abc1636dec729a
|
963eae041a23d53175bcc4eec1d45ad7f585caa2
|
refs/heads/main
| 2023-08-09T17:30:41.379131
| 2023-07-06T14:03:59
| 2023-07-06T14:03:59
| 125,505,498
| 381
| 110
|
BSD-2-Clause
| 2023-08-22T11:34:05
| 2018-03-16T11:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,783
|
py
|
utils.py
|
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Various utility functions.
This module contains various functions used throughout the rest of the project.
"""
from __future__ import annotations
from base64 import urlsafe_b64decode, urlsafe_b64encode
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hmac, hashes
from io import BytesIO
from dataclasses import fields, Field
from abc import abstractmethod
from typing import (
Union,
Optional,
Sequence,
Mapping,
Any,
TypeVar,
Hashable,
get_type_hints,
)
import struct
__all__ = [
"websafe_encode",
"websafe_decode",
"sha256",
"hmac_sha256",
"bytes2int",
"int2bytes",
]
LOG_LEVEL_TRAFFIC = 5
def sha256(data: bytes) -> bytes:
"""Produces a SHA256 hash of the input.
:param data: The input data to hash.
:return: The resulting hash.
"""
h = hashes.Hash(hashes.SHA256(), default_backend())
h.update(data)
return h.finalize()
def hmac_sha256(key: bytes, data: bytes) -> bytes:
"""Performs an HMAC-SHA256 operation on the given data, using the given key.
:param key: The key to use.
:param data: The input data to hash.
:return: The resulting hash.
"""
h = hmac.HMAC(key, hashes.SHA256(), default_backend())
h.update(data)
return h.finalize()
def bytes2int(value: bytes) -> int:
"""Parses an arbitrarily sized integer from a byte string.
:param value: A byte string encoding a big endian unsigned integer.
:return: The parsed int.
"""
return int.from_bytes(value, "big")
def int2bytes(value: int, minlen: int = -1) -> bytes:
"""Encodes an int as a byte string.
:param value: The integer value to encode.
:param minlen: An optional minimum length for the resulting byte string.
:return: The value encoded as a big endian byte string.
"""
ba = []
while value > 0xFF:
ba.append(0xFF & value)
value >>= 8
ba.append(value)
ba.extend([0] * (minlen - len(ba)))
return bytes(reversed(ba))
def websafe_decode(data: Union[str, bytes]) -> bytes:
"""Decodes a websafe-base64 encoded string.
See: "Base 64 Encoding with URL and Filename Safe Alphabet" from Section 5
in RFC4648 without padding.
:param data: The input to decode.
:return: The decoded bytes.
"""
if isinstance(data, str):
data = data.encode("ascii")
data += b"=" * (-len(data) % 4)
return urlsafe_b64decode(data)
def websafe_encode(data: bytes) -> str:
"""Encodes a byte string into websafe-base64 encoding.
:param data: The input to encode.
:return: The encoded string.
"""
return urlsafe_b64encode(data).replace(b"=", b"").decode("ascii")
class ByteBuffer(BytesIO):
"""BytesIO-like object with the ability to unpack values."""
def unpack(self, fmt: str):
"""Reads and unpacks a value from the buffer.
:param fmt: A struct format string yielding a single value.
:return: The unpacked value.
"""
s = struct.Struct(fmt)
return s.unpack(self.read(s.size))[0]
def read(self, size: Optional[int] = -1) -> bytes:
"""Like BytesIO.read(), but checks the number of bytes read and raises an error
if fewer bytes were read than expected.
"""
data = super().read(size)
if size is not None and size > 0 and len(data) != size:
raise ValueError(
"Not enough data to read (need: %d, had: %d)." % (size, len(data))
)
return data
def _snake2camel(name: str) -> str:
parts = name.split("_")
return parts[0] + "".join(p.title() for p in parts[1:])
def _parse_value(t, value):
if value is None:
return None
if Optional[t] == t: # Optional, get the type
t = t.__args__[0]
# Handle list of values
if issubclass(getattr(t, "__origin__", object), Sequence):
t = t.__args__[0]
return [_parse_value(t, v) for v in value]
# Handle Mappings
if issubclass(getattr(t, "__origin__", object), Mapping) and isinstance(
value, Mapping
):
return value
# Check if type is already correct
try:
if isinstance(value, t):
return value
except TypeError:
pass
# Check for subclass of _DataClassMapping
try:
is_dataclass = issubclass(t, _DataClassMapping)
except TypeError:
is_dataclass = False
if is_dataclass:
# Recursively call from_dict for nested _DataClassMappings
return t.from_dict(value)
# Convert to enum values, other wrappers
return t(value)
_T = TypeVar("_T", bound=Hashable)
class _DataClassMapping(Mapping[_T, Any]):
# TODO: This requires Python 3.9, and fixes the tpye errors we now ignore
# __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]
def __post_init__(self):
hints = get_type_hints(type(self))
for f in fields(self): # type: ignore
value = getattr(self, f.name)
if value is None:
continue
try:
value = _parse_value(hints[f.name], value)
except (TypeError, KeyError, ValueError):
raise ValueError(
f"Error parsing field {f.name} for {self.__class__.__name__}"
)
object.__setattr__(self, f.name, value)
@classmethod
@abstractmethod
def _get_field_key(cls, field: Field) -> _T:
raise NotImplementedError()
def __getitem__(self, key):
for f in fields(self): # type: ignore
if key == self._get_field_key(f):
value = getattr(self, f.name)
serialize = f.metadata.get("serialize")
if serialize:
return serialize(value)
if isinstance(value, _DataClassMapping):
return dict(value)
if isinstance(value, Sequence) and all(
isinstance(x, _DataClassMapping) for x in value
):
return [dict(x) for x in value]
return value
raise KeyError(key)
def __iter__(self):
return (
self._get_field_key(f)
for f in fields(self) # type: ignore
if getattr(self, f.name) is not None
)
def __len__(self):
return len(list(iter(self)))
@classmethod
def from_dict(cls, data: Optional[Mapping[_T, Any]]):
if data is None:
return None
if isinstance(data, cls):
return data
if not isinstance(data, Mapping):
raise TypeError(
f"{cls.__name__}.from_dict called with non-Mapping data of type"
f"{type(data)}"
)
kwargs = {}
for f in fields(cls): # type: ignore
key = cls._get_field_key(f)
if key in data:
value = data[key]
if value is not None:
deserialize = f.metadata.get("deserialize")
if deserialize:
value = deserialize(value)
kwargs[f.name] = value
return cls(**kwargs)
class _CamelCaseDataObject(_DataClassMapping[str]):
@classmethod
def _get_field_key(cls, field: Field) -> str:
return field.metadata.get("name", _snake2camel(field.name))
|
d04569123f774d3375dbc572b69d43a5bd1e817f
|
22947f7f3690ff99c2caf823c2d91db030bb0a0c
|
/pysnow/resource.py
|
1c884343516ef5d0d8403ed746ac445da9ff1ba2
|
[
"MIT"
] |
permissive
|
rbw/pysnow
|
5ade19a80a4ce5f8e8bf651837332761c7675ccd
|
6ac140aab631ef7029b8f211b15ebd3afcbb151e
|
refs/heads/master
| 2023-08-10T02:45:43.589989
| 2022-01-19T00:53:00
| 2022-01-19T00:53:00
| 64,137,253
| 142
| 71
|
MIT
| 2023-07-20T22:33:50
| 2016-07-25T13:36:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,824
|
py
|
resource.py
|
# -*- coding: utf-8 -*-
import logging
from copy import copy, deepcopy
from .request import SnowRequest
from .attachment import Attachment
from .url_builder import URLBuilder
from .exceptions import InvalidUsage
logger = logging.getLogger("pysnow")
class Resource(object):
r"""Creates a new :class:`Resource` object
Resources provides a natural way of interfacing with ServiceNow APIs.
:param base_path: Base path
:param api_path: API path
:param chunk_size: Response stream parser chunk size (in bytes)
:param \*\*kwargs: Arguments to pass along to :class:`Request`
"""
def __init__(
self, base_url=None, base_path=None, api_path=None, parameters=None, **kwargs
):
self._base_url = base_url
self._base_path = base_path
self._api_path = api_path
self._url_builder = URLBuilder(base_url, base_path, api_path)
self.kwargs = kwargs
self.parameters = deepcopy(parameters)
logger.debug(
"(RESOURCE_ADD) Object: %s, chunk_size: %d"
% (self, kwargs.get("chunk_size"))
)
def __repr__(self):
return "<%s [%s] at %s>" % (self.__class__.__name__, self.path, hex(id(self)))
@property
def path(self):
"""Get current path relative to base URL
:return: resource path
"""
return "%s" % self._base_path + self._api_path
@property
def attachments(self):
"""Provides an `Attachment` API for this resource.
Enables easy listing, deleting and creating new attachments.
:return: Attachment object
"""
resource = copy(self)
resource._url_builder = URLBuilder(
self._base_url, self._base_path, "/attachment"
)
path = self._api_path.strip("/").split("/")
if path[0] != "table":
raise InvalidUsage("The attachment API can only be used with the table API")
return Attachment(resource, path[1])
@property
def _request(self):
"""Request wrapper
:return: SnowRequest object
"""
parameters = copy(self.parameters)
return SnowRequest(
url_builder=self._url_builder,
parameters=parameters,
resource=self,
**self.kwargs
)
def get_record_link(self, sys_id):
"""Provides full URL to the provided sys_id
:param sys_id: sys_id to generate URL for
:return: full sys_id URL
"""
return "%s/%s" % (self._url_builder.get_url(), sys_id)
def get(self, *args, **kwargs):
"""Queries the API resource
:param args:
- :param query: Dictionary, string or :class:`QueryBuilder` object
defaults to empty dict (all)
:param kwargs:
- :param limit: Limits the number of records returned
- :param fields: List of fields to include in the response
created_on in descending order.
- :param offset: Number of records to skip before returning records
- :param stream: Whether or not to use streaming / generator response interface
:return:
- :class:`Response` object
"""
return self._request.get(*args, **kwargs)
def create(self, payload):
"""Creates a new record in the API resource
:param payload: Dictionary containing key-value fields of the new record
:return:
- Dictionary of the inserted record
"""
return self._request.create(payload)
def update(self, query, payload):
"""Updates a record in the API resource
:param query: Dictionary, string or :class:`QueryBuilder` object
:param payload: Dictionary containing key-value fields of the record to be updated
:return:
- Dictionary of the updated record
"""
return self._request.update(query, payload)
def delete(self, query):
"""Deletes matching record
:param query: Dictionary, string or :class:`QueryBuilder` object
:return:
- Dictionary containing information about deletion result
"""
return self._request.delete(query)
def request(self, method, path_append=None, headers=None, **kwargs):
"""Create a custom request
:param method: HTTP method to use
:param path_append: (optional) relative to :attr:`api_path`
:param headers: (optional) Dictionary of headers to add or override
:param kwargs: kwargs to pass along to :class:`requests.Request`
:return:
- :class:`Response` object
"""
return self._request.custom(
method, path_append=path_append, headers=headers, **kwargs
)
|
3c4d02d8fa551abec4015945e5201d3dd25c82c3
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/audi_be.py
|
5d7ab70ad741a363b08055d3fd8b78b7485e48bc
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
audi_be.py
|
import scrapy
from locations.items import Feature
from locations.user_agents import BROWSER_DEFAULT
class AudiBeSpider(scrapy.Spider):
name = "audi_be"
item_attributes = {
"brand": "Audi",
"brand_wikidata": "Q23317",
}
allowed_domains = ["audi.be"]
custom_settings = {
"ROBOTSTXT_OBEY": False,
"DEFAULT_REQUEST_HEADERS": {
"Content-Type": "application/json;charset=UTF-8",
"User-Agent": BROWSER_DEFAULT,
},
}
def start_requests(self):
url = "https://dealerlocator.dieteren.be/api/locator.asmx/SearchEntities"
payload = '{"request":{"TemplateID":11,"Sale":"N","AfterSale":"N","ETron":false,"AudiSport":false,"Aap":false,"Gte":false,"Language":"fr"}}'
yield scrapy.Request(url=url, body=payload, method="POST", callback=self.parse)
def parse(self, response):
for row in response.json()["d"]["Dealers"]:
item = Feature()
item["ref"] = row.get("UTE")
item["name"] = row.get("NAME")
item["street_address"] = row.get("ADDRESS")
item["city"] = row.get("CITY")
item["postcode"] = row.get("ZIP")
item["lat"] = float(row.get("GPSLAT").replace(",", "."))
item["lon"] = float(row.get("GPSLONG").replace(",", "."))
item["country"] = row.get("country")
item["phone"] = row.get("TEL")
item["website"] = row.get("URL")
item["email"] = row.get("MAIL")
yield item
|
bca752b1d78b93840f25ef45cfe77b8a5a42c6ed
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/JetMETCorrections/Type1MET/python/multPhiCorr_phys14_cfi.py
|
9a74207bff0adcb114701e73c94f7b9476d78714
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,276
|
py
|
multPhiCorr_phys14_cfi.py
|
import FWCore.ParameterSet.Config as cms
multPhiCorr_phys14 = cms.VPSet(
cms.PSet(
name=cms.string("h"),
type=cms.int32(1),
varType=cms.int32(0),
etaMin=cms.double(-2.7),
etaMax=cms.double(2.7),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.00100488080903,-2.52390571355e-07),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.00861386123702,4.62378996583e-07),
),
cms.PSet(
name=cms.string("h0Barrel"),
type=cms.int32(5),
varType=cms.int32(0),
etaMin=cms.double(-1.392),
etaMax=cms.double(1.392),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.00934165318347,-0.000231264440872),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.00571103266272,-6.23866320566e-05),
),
cms.PSet(
name=cms.string("h0EndcapPlus"),
type=cms.int32(5),
varType=cms.int32(0),
etaMin=cms.double(1.392),
etaMax=cms.double(3),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(0.0120553467578,0.000223494565384),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(0.0230337515784,0.000500746987632),
),
cms.PSet(
name=cms.string("h0EndcapMinus"),
type=cms.int32(5),
varType=cms.int32(0),
etaMin=cms.double(-3.0),
etaMax=cms.double(-1.392),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(0.0150317804201,9.95330180319e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(0.0308198327343,-1.47375713164e-05),
),
cms.PSet(
name=cms.string("gammaBarrel"),
type=cms.int32(4),
varType=cms.int32(2),
etaMin=cms.double(-1.479),
etaMax=cms.double(1.479),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.00183455254786,-4.36950442113e-06),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.00111148670267,4.48001318056e-06),
),
cms.PSet(
name=cms.string("gammaEndcapPlus"),
type=cms.int32(4),
varType=cms.int32(2),
etaMin=cms.double(1.479),
etaMax=cms.double(3.0),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.000567748005415,2.81999488957e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(0.00415562062208,3.98784866726e-05),
),
cms.PSet(
name=cms.string("gammaEndcapMinus"),
type=cms.int32(4),
varType=cms.int32(2),
etaMin=cms.double(-3.0),
etaMax=cms.double(-1.479),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(0.000776596089228,1.63661712992e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.00446841488218,1.28857828225e-05),
),
cms.PSet(
name=cms.string("hHFPlus"),
type=cms.int32(6),
varType=cms.int32(0),
etaMin=cms.double(2.901376),
etaMax=cms.double(5.2),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.000902843902424,-1.05368116476e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.000928116206712,-2.02159610368e-05),
),
cms.PSet(
name=cms.string("hHFMinus"),
type=cms.int32(6),
varType=cms.int32(0),
etaMin=cms.double(-5.2),
etaMax=cms.double(-2.901376),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.000797275948578,-1.05466329044e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.000679522388353,-2.16586402923e-05),
),
cms.PSet(
name=cms.string("egammaHFPlus"),
type=cms.int32(7),
varType=cms.int32(0),
etaMin=cms.double(2.901376),
etaMax=cms.double(5.2),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.00107966151869,1.12289163272e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.0022998808914,1.79406614595e-05),
),
cms.PSet(
name=cms.string("egammaHFMinus"),
type=cms.int32(7),
varType=cms.int32(0),
etaMin=cms.double(-5.2),
etaMax=cms.double(-2.901376),
fx=cms.string("(x*[0])+(sq(x)*[1])"),
px=cms.vdouble(-0.00135787955563,1.2190906775e-05),
fy=cms.string("(x*[0])+(sq(x)*[1])"),
py=cms.vdouble(-0.00224502303282,1.86304722922e-05),
),
)
|
e3d198d71924b4eb4225b8db9765778d4b3f60e8
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/pywin32/win32/lib/win32netcon.pyi
|
0538f443f18daf6ad92f2ee6d9a82d3a67631027
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 14,831
|
pyi
|
win32netcon.pyi
|
CNLEN: int
LM20_CNLEN: int
DNLEN: int
LM20_DNLEN: int
UNCLEN: int
LM20_UNCLEN: int
NNLEN: int
LM20_NNLEN: int
RMLEN: int
LM20_RMLEN: int
SNLEN: int
LM20_SNLEN: int
STXTLEN: int
LM20_STXTLEN: int
PATHLEN: int
LM20_PATHLEN: int
DEVLEN: int
LM20_DEVLEN: int
EVLEN: int
UNLEN: int
LM20_UNLEN: int
GNLEN: int
LM20_GNLEN: int
PWLEN: int
LM20_PWLEN: int
SHPWLEN: int
CLTYPE_LEN: int
MAXCOMMENTSZ: int
LM20_MAXCOMMENTSZ: int
QNLEN: int
LM20_QNLEN: int
ALERTSZ: int
NETBIOS_NAME_LEN: int
CRYPT_KEY_LEN: int
CRYPT_TXT_LEN: int
ENCRYPTED_PWLEN: int
SESSION_PWLEN: int
SESSION_CRYPT_KLEN: int
PARMNUM_ALL: int
PARM_ERROR_NONE: int
PARMNUM_BASE_INFOLEVEL: int
NULL: int
PLATFORM_ID_DOS: int
PLATFORM_ID_OS2: int
PLATFORM_ID_NT: int
PLATFORM_ID_OSF: int
PLATFORM_ID_VMS: int
MAX_LANMAN_MESSAGE_ID: int
UF_SCRIPT: int
UF_ACCOUNTDISABLE: int
UF_HOMEDIR_REQUIRED: int
UF_LOCKOUT: int
UF_PASSWD_NOTREQD: int
UF_PASSWD_CANT_CHANGE: int
UF_TEMP_DUPLICATE_ACCOUNT: int
UF_NORMAL_ACCOUNT: int
UF_INTERDOMAIN_TRUST_ACCOUNT: int
UF_WORKSTATION_TRUST_ACCOUNT: int
UF_SERVER_TRUST_ACCOUNT: int
UF_MACHINE_ACCOUNT_MASK: int
UF_ACCOUNT_TYPE_MASK: int
UF_DONT_EXPIRE_PASSWD: int
UF_MNS_LOGON_ACCOUNT: int
UF_SETTABLE_BITS: int
FILTER_TEMP_DUPLICATE_ACCOUNT: int
FILTER_NORMAL_ACCOUNT: int
FILTER_INTERDOMAIN_TRUST_ACCOUNT: int
FILTER_WORKSTATION_TRUST_ACCOUNT: int
FILTER_SERVER_TRUST_ACCOUNT: int
LG_INCLUDE_INDIRECT: int
AF_OP_PRINT: int
AF_OP_COMM: int
AF_OP_SERVER: int
AF_OP_ACCOUNTS: int
AF_SETTABLE_BITS: int
UAS_ROLE_STANDALONE: int
UAS_ROLE_MEMBER: int
UAS_ROLE_BACKUP: int
UAS_ROLE_PRIMARY: int
USER_NAME_PARMNUM: int
USER_PASSWORD_PARMNUM: int
USER_PASSWORD_AGE_PARMNUM: int
USER_PRIV_PARMNUM: int
USER_HOME_DIR_PARMNUM: int
USER_COMMENT_PARMNUM: int
USER_FLAGS_PARMNUM: int
USER_SCRIPT_PATH_PARMNUM: int
USER_AUTH_FLAGS_PARMNUM: int
USER_FULL_NAME_PARMNUM: int
USER_USR_COMMENT_PARMNUM: int
USER_PARMS_PARMNUM: int
USER_WORKSTATIONS_PARMNUM: int
USER_LAST_LOGON_PARMNUM: int
USER_LAST_LOGOFF_PARMNUM: int
USER_ACCT_EXPIRES_PARMNUM: int
USER_MAX_STORAGE_PARMNUM: int
USER_UNITS_PER_WEEK_PARMNUM: int
USER_LOGON_HOURS_PARMNUM: int
USER_PAD_PW_COUNT_PARMNUM: int
USER_NUM_LOGONS_PARMNUM: int
USER_LOGON_SERVER_PARMNUM: int
USER_COUNTRY_CODE_PARMNUM: int
USER_CODE_PAGE_PARMNUM: int
USER_PRIMARY_GROUP_PARMNUM: int
USER_PROFILE: int
USER_PROFILE_PARMNUM: int
USER_HOME_DIR_DRIVE_PARMNUM: int
USER_NAME_INFOLEVEL: int
USER_PASSWORD_INFOLEVEL: int
USER_PASSWORD_AGE_INFOLEVEL: int
USER_PRIV_INFOLEVEL: int
USER_HOME_DIR_INFOLEVEL: int
USER_COMMENT_INFOLEVEL: int
USER_FLAGS_INFOLEVEL: int
USER_SCRIPT_PATH_INFOLEVEL: int
USER_AUTH_FLAGS_INFOLEVEL: int
USER_FULL_NAME_INFOLEVEL: int
USER_USR_COMMENT_INFOLEVEL: int
USER_PARMS_INFOLEVEL: int
USER_WORKSTATIONS_INFOLEVEL: int
USER_LAST_LOGON_INFOLEVEL: int
USER_LAST_LOGOFF_INFOLEVEL: int
USER_ACCT_EXPIRES_INFOLEVEL: int
USER_MAX_STORAGE_INFOLEVEL: int
USER_UNITS_PER_WEEK_INFOLEVEL: int
USER_LOGON_HOURS_INFOLEVEL: int
USER_PAD_PW_COUNT_INFOLEVEL: int
USER_NUM_LOGONS_INFOLEVEL: int
USER_LOGON_SERVER_INFOLEVEL: int
USER_COUNTRY_CODE_INFOLEVEL: int
USER_CODE_PAGE_INFOLEVEL: int
USER_PRIMARY_GROUP_INFOLEVEL: int
USER_HOME_DIR_DRIVE_INFOLEVEL: int
NULL_USERSETINFO_PASSWD: str
UNITS_PER_DAY: int
UNITS_PER_WEEK: int
USER_PRIV_MASK: int
USER_PRIV_GUEST: int
USER_PRIV_USER: int
USER_PRIV_ADMIN: int
MAX_PASSWD_LEN: int
DEF_MIN_PWLEN: int
DEF_PWUNIQUENESS: int
DEF_MAX_PWHIST: int
DEF_MAX_BADPW: int
VALIDATED_LOGON: int
PASSWORD_EXPIRED: int
NON_VALIDATED_LOGON: int
VALID_LOGOFF: int
MODALS_MIN_PASSWD_LEN_PARMNUM: int
MODALS_MAX_PASSWD_AGE_PARMNUM: int
MODALS_MIN_PASSWD_AGE_PARMNUM: int
MODALS_FORCE_LOGOFF_PARMNUM: int
MODALS_PASSWD_HIST_LEN_PARMNUM: int
MODALS_ROLE_PARMNUM: int
MODALS_PRIMARY_PARMNUM: int
MODALS_DOMAIN_NAME_PARMNUM: int
MODALS_DOMAIN_ID_PARMNUM: int
MODALS_LOCKOUT_DURATION_PARMNUM: int
MODALS_LOCKOUT_OBSERVATION_WINDOW_PARMNUM: int
MODALS_LOCKOUT_THRESHOLD_PARMNUM: int
MODALS_MIN_PASSWD_LEN_INFOLEVEL: int
MODALS_MAX_PASSWD_AGE_INFOLEVEL: int
MODALS_MIN_PASSWD_AGE_INFOLEVEL: int
MODALS_FORCE_LOGOFF_INFOLEVEL: int
MODALS_PASSWD_HIST_LEN_INFOLEVEL: int
MODALS_ROLE_INFOLEVEL: int
MODALS_PRIMARY_INFOLEVEL: int
MODALS_DOMAIN_NAME_INFOLEVEL: int
MODALS_DOMAIN_ID_INFOLEVEL: int
GROUPIDMASK: int
GROUP_ALL_PARMNUM: int
GROUP_NAME_PARMNUM: int
GROUP_COMMENT_PARMNUM: int
GROUP_ATTRIBUTES_PARMNUM: int
GROUP_ALL_INFOLEVEL: int
GROUP_NAME_INFOLEVEL: int
GROUP_COMMENT_INFOLEVEL: int
GROUP_ATTRIBUTES_INFOLEVEL: int
LOCALGROUP_NAME_PARMNUM: int
LOCALGROUP_COMMENT_PARMNUM: int
MAXPERMENTRIES: int
ACCESS_NONE: int
ACCESS_READ: int
ACCESS_WRITE: int
ACCESS_CREATE: int
ACCESS_EXEC: int
ACCESS_DELETE: int
ACCESS_ATRIB: int
ACCESS_PERM: int
ACCESS_GROUP: int
ACCESS_AUDIT: int
ACCESS_SUCCESS_OPEN: int
ACCESS_SUCCESS_WRITE: int
ACCESS_SUCCESS_DELETE: int
ACCESS_SUCCESS_ACL: int
ACCESS_SUCCESS_MASK: int
ACCESS_FAIL_OPEN: int
ACCESS_FAIL_WRITE: int
ACCESS_FAIL_DELETE: int
ACCESS_FAIL_ACL: int
ACCESS_FAIL_MASK: int
ACCESS_FAIL_SHIFT: int
ACCESS_RESOURCE_NAME_PARMNUM: int
ACCESS_ATTR_PARMNUM: int
ACCESS_COUNT_PARMNUM: int
ACCESS_RESOURCE_NAME_INFOLEVEL: int
ACCESS_ATTR_INFOLEVEL: int
ACCESS_COUNT_INFOLEVEL: int
ACCESS_LETTERS: str
NETLOGON_CONTROL_QUERY: int
NETLOGON_CONTROL_REPLICATE: int
NETLOGON_CONTROL_SYNCHRONIZE: int
NETLOGON_CONTROL_PDC_REPLICATE: int
NETLOGON_CONTROL_REDISCOVER: int
NETLOGON_CONTROL_TC_QUERY: int
NETLOGON_CONTROL_TRANSPORT_NOTIFY: int
NETLOGON_CONTROL_FIND_USER: int
NETLOGON_CONTROL_UNLOAD_NETLOGON_DLL: int
NETLOGON_CONTROL_BACKUP_CHANGE_LOG: int
NETLOGON_CONTROL_TRUNCATE_LOG: int
NETLOGON_CONTROL_SET_DBFLAG: int
NETLOGON_CONTROL_BREAKPOINT: int
NETLOGON_REPLICATION_NEEDED: int
NETLOGON_REPLICATION_IN_PROGRESS: int
NETLOGON_FULL_SYNC_REPLICATION: int
NETLOGON_REDO_NEEDED: int
def TEXT(x: str) -> str: ...
MAX_PREFERRED_LENGTH: int
PARM_ERROR_UNKNOWN: int
MESSAGE_FILENAME: str
OS2MSG_FILENAME: str
HELP_MSG_FILENAME: str
BACKUP_MSG_FILENAME: str
TIMEQ_FOREVER: int
USER_MAXSTORAGE_UNLIMITED: int
USER_NO_LOGOFF: int
DEF_MAX_PWAGE: int
DEF_MIN_PWAGE: int
DEF_FORCE_LOGOFF: int
ONE_DAY: int
GROUP_SPECIALGRP_USERS: str
GROUP_SPECIALGRP_ADMINS: str
GROUP_SPECIALGRP_GUESTS: str
GROUP_SPECIALGRP_LOCAL: str
ACCESS_ALL: int
SV_PLATFORM_ID_OS2: int
SV_PLATFORM_ID_NT: int
MAJOR_VERSION_MASK: int
SV_TYPE_WORKSTATION: int
SV_TYPE_SERVER: int
SV_TYPE_SQLSERVER: int
SV_TYPE_DOMAIN_CTRL: int
SV_TYPE_DOMAIN_BAKCTRL: int
SV_TYPE_TIME_SOURCE: int
SV_TYPE_AFP: int
SV_TYPE_NOVELL: int
SV_TYPE_DOMAIN_MEMBER: int
SV_TYPE_PRINTQ_SERVER: int
SV_TYPE_DIALIN_SERVER: int
SV_TYPE_XENIX_SERVER: int
SV_TYPE_SERVER_UNIX: int
SV_TYPE_NT: int
SV_TYPE_WFW: int
SV_TYPE_SERVER_MFPN: int
SV_TYPE_SERVER_NT: int
SV_TYPE_POTENTIAL_BROWSER: int
SV_TYPE_BACKUP_BROWSER: int
SV_TYPE_MASTER_BROWSER: int
SV_TYPE_DOMAIN_MASTER: int
SV_TYPE_SERVER_OSF: int
SV_TYPE_SERVER_VMS: int
SV_TYPE_WINDOWS: int
SV_TYPE_DFS: int
SV_TYPE_CLUSTER_NT: int
SV_TYPE_DCE: int
SV_TYPE_ALTERNATE_XPORT: int
SV_TYPE_DOMAIN_ENUM: int
SV_TYPE_ALL: int
SV_NODISC: int
SV_USERSECURITY: int
SV_SHARESECURITY: int
SV_HIDDEN: int
SV_VISIBLE: int
SV_PLATFORM_ID_PARMNUM: int
SV_NAME_PARMNUM: int
SV_VERSION_MAJOR_PARMNUM: int
SV_VERSION_MINOR_PARMNUM: int
SV_TYPE_PARMNUM: int
SV_COMMENT_PARMNUM: int
SV_USERS_PARMNUM: int
SV_DISC_PARMNUM: int
SV_HIDDEN_PARMNUM: int
SV_ANNOUNCE_PARMNUM: int
SV_ANNDELTA_PARMNUM: int
SV_USERPATH_PARMNUM: int
SV_ALERTS_PARMNUM: int
SV_SECURITY_PARMNUM: int
SV_NUMADMIN_PARMNUM: int
SV_LANMASK_PARMNUM: int
SV_GUESTACC_PARMNUM: int
SV_CHDEVQ_PARMNUM: int
SV_CHDEVJOBS_PARMNUM: int
SV_CONNECTIONS_PARMNUM: int
SV_SHARES_PARMNUM: int
SV_OPENFILES_PARMNUM: int
SV_SESSREQS_PARMNUM: int
SV_ACTIVELOCKS_PARMNUM: int
SV_NUMREQBUF_PARMNUM: int
SV_NUMBIGBUF_PARMNUM: int
SV_NUMFILETASKS_PARMNUM: int
SV_ALERTSCHED_PARMNUM: int
SV_ERRORALERT_PARMNUM: int
SV_LOGONALERT_PARMNUM: int
SV_ACCESSALERT_PARMNUM: int
SV_DISKALERT_PARMNUM: int
SV_NETIOALERT_PARMNUM: int
SV_MAXAUDITSZ_PARMNUM: int
SV_SRVHEURISTICS_PARMNUM: int
SV_SESSOPENS_PARMNUM: int
SV_SESSVCS_PARMNUM: int
SV_OPENSEARCH_PARMNUM: int
SV_SIZREQBUF_PARMNUM: int
SV_INITWORKITEMS_PARMNUM: int
SV_MAXWORKITEMS_PARMNUM: int
SV_RAWWORKITEMS_PARMNUM: int
SV_IRPSTACKSIZE_PARMNUM: int
SV_MAXRAWBUFLEN_PARMNUM: int
SV_SESSUSERS_PARMNUM: int
SV_SESSCONNS_PARMNUM: int
SV_MAXNONPAGEDMEMORYUSAGE_PARMNUM: int
SV_MAXPAGEDMEMORYUSAGE_PARMNUM: int
SV_ENABLESOFTCOMPAT_PARMNUM: int
SV_ENABLEFORCEDLOGOFF_PARMNUM: int
SV_TIMESOURCE_PARMNUM: int
SV_ACCEPTDOWNLEVELAPIS_PARMNUM: int
SV_LMANNOUNCE_PARMNUM: int
SV_DOMAIN_PARMNUM: int
SV_MAXCOPYREADLEN_PARMNUM: int
SV_MAXCOPYWRITELEN_PARMNUM: int
SV_MINKEEPSEARCH_PARMNUM: int
SV_MAXKEEPSEARCH_PARMNUM: int
SV_MINKEEPCOMPLSEARCH_PARMNUM: int
SV_MAXKEEPCOMPLSEARCH_PARMNUM: int
SV_THREADCOUNTADD_PARMNUM: int
SV_NUMBLOCKTHREADS_PARMNUM: int
SV_SCAVTIMEOUT_PARMNUM: int
SV_MINRCVQUEUE_PARMNUM: int
SV_MINFREEWORKITEMS_PARMNUM: int
SV_XACTMEMSIZE_PARMNUM: int
SV_THREADPRIORITY_PARMNUM: int
SV_MAXMPXCT_PARMNUM: int
SV_OPLOCKBREAKWAIT_PARMNUM: int
SV_OPLOCKBREAKRESPONSEWAIT_PARMNUM: int
SV_ENABLEOPLOCKS_PARMNUM: int
SV_ENABLEOPLOCKFORCECLOSE_PARMNUM: int
SV_ENABLEFCBOPENS_PARMNUM: int
SV_ENABLERAW_PARMNUM: int
SV_ENABLESHAREDNETDRIVES_PARMNUM: int
SV_MINFREECONNECTIONS_PARMNUM: int
SV_MAXFREECONNECTIONS_PARMNUM: int
SV_INITSESSTABLE_PARMNUM: int
SV_INITCONNTABLE_PARMNUM: int
SV_INITFILETABLE_PARMNUM: int
SV_INITSEARCHTABLE_PARMNUM: int
SV_ALERTSCHEDULE_PARMNUM: int
SV_ERRORTHRESHOLD_PARMNUM: int
SV_NETWORKERRORTHRESHOLD_PARMNUM: int
SV_DISKSPACETHRESHOLD_PARMNUM: int
SV_MAXLINKDELAY_PARMNUM: int
SV_MINLINKTHROUGHPUT_PARMNUM: int
SV_LINKINFOVALIDTIME_PARMNUM: int
SV_SCAVQOSINFOUPDATETIME_PARMNUM: int
SV_MAXWORKITEMIDLETIME_PARMNUM: int
SV_MAXRAWWORKITEMS_PARMNUM: int
SV_PRODUCTTYPE_PARMNUM: int
SV_SERVERSIZE_PARMNUM: int
SV_CONNECTIONLESSAUTODISC_PARMNUM: int
SV_SHARINGVIOLATIONRETRIES_PARMNUM: int
SV_SHARINGVIOLATIONDELAY_PARMNUM: int
SV_MAXGLOBALOPENSEARCH_PARMNUM: int
SV_REMOVEDUPLICATESEARCHES_PARMNUM: int
SV_LOCKVIOLATIONRETRIES_PARMNUM: int
SV_LOCKVIOLATIONOFFSET_PARMNUM: int
SV_LOCKVIOLATIONDELAY_PARMNUM: int
SV_MDLREADSWITCHOVER_PARMNUM: int
SV_CACHEDOPENLIMIT_PARMNUM: int
SV_CRITICALTHREADS_PARMNUM: int
SV_RESTRICTNULLSESSACCESS_PARMNUM: int
SV_ENABLEWFW311DIRECTIPX_PARMNUM: int
SV_OTHERQUEUEAFFINITY_PARMNUM: int
SV_QUEUESAMPLESECS_PARMNUM: int
SV_BALANCECOUNT_PARMNUM: int
SV_PREFERREDAFFINITY_PARMNUM: int
SV_MAXFREERFCBS_PARMNUM: int
SV_MAXFREEMFCBS_PARMNUM: int
SV_MAXFREELFCBS_PARMNUM: int
SV_MAXFREEPAGEDPOOLCHUNKS_PARMNUM: int
SV_MINPAGEDPOOLCHUNKSIZE_PARMNUM: int
SV_MAXPAGEDPOOLCHUNKSIZE_PARMNUM: int
SV_SENDSFROMPREFERREDPROCESSOR_PARMNUM: int
SV_MAXTHREADSPERQUEUE_PARMNUM: int
SV_CACHEDDIRECTORYLIMIT_PARMNUM: int
SV_MAXCOPYLENGTH_PARMNUM: int
SV_ENABLEBULKTRANSFER_PARMNUM: int
SV_ENABLECOMPRESSION_PARMNUM: int
SV_AUTOSHAREWKS_PARMNUM: int
SV_AUTOSHARESERVER_PARMNUM: int
SV_ENABLESECURITYSIGNATURE_PARMNUM: int
SV_REQUIRESECURITYSIGNATURE_PARMNUM: int
SV_MINCLIENTBUFFERSIZE_PARMNUM: int
SV_CONNECTIONNOSESSIONSTIMEOUT_PARMNUM: int
SVI1_NUM_ELEMENTS: int
SVI2_NUM_ELEMENTS: int
SVI3_NUM_ELEMENTS: int
SW_AUTOPROF_LOAD_MASK: int
SW_AUTOPROF_SAVE_MASK: int
SV_MAX_SRV_HEUR_LEN: int
SV_USERS_PER_LICENSE: int
SVTI2_REMAP_PIPE_NAMES: int
SHARE_NETNAME_PARMNUM: int
SHARE_TYPE_PARMNUM: int
SHARE_REMARK_PARMNUM: int
SHARE_PERMISSIONS_PARMNUM: int
SHARE_MAX_USES_PARMNUM: int
SHARE_CURRENT_USES_PARMNUM: int
SHARE_PATH_PARMNUM: int
SHARE_PASSWD_PARMNUM: int
SHARE_FILE_SD_PARMNUM: int
SHI1_NUM_ELEMENTS: int
SHI2_NUM_ELEMENTS: int
STYPE_DISKTREE: int
STYPE_PRINTQ: int
STYPE_DEVICE: int
STYPE_IPC: int
STYPE_SPECIAL: int
SHI1005_FLAGS_DFS: int
SHI1005_FLAGS_DFS_ROOT: int
COW_PERMACHINE: int
COW_PERUSER: int
CSC_CACHEABLE: int
CSC_NOFLOWOPS: int
CSC_AUTO_INWARD: int
CSC_AUTO_OUTWARD: int
SHI1005_VALID_FLAGS_SET: int
SHI1007_VALID_FLAGS_SET: int
SESS_GUEST: int
SESS_NOENCRYPTION: int
SESI1_NUM_ELEMENTS: int
SESI2_NUM_ELEMENTS: int
PERM_FILE_READ: int
PERM_FILE_WRITE: int
PERM_FILE_CREATE: int
WNNC_NET_MSNET: int
WNNC_NET_LANMAN: int
WNNC_NET_NETWARE: int
WNNC_NET_VINES: int
WNNC_NET_10NET: int
WNNC_NET_LOCUS: int
WNNC_NET_SUN_PC_NFS: int
WNNC_NET_LANSTEP: int
WNNC_NET_9TILES: int
WNNC_NET_LANTASTIC: int
WNNC_NET_AS400: int
WNNC_NET_FTP_NFS: int
WNNC_NET_PATHWORKS: int
WNNC_NET_LIFENET: int
WNNC_NET_POWERLAN: int
WNNC_NET_BWNFS: int
WNNC_NET_COGENT: int
WNNC_NET_FARALLON: int
WNNC_NET_APPLETALK: int
WNNC_NET_INTERGRAPH: int
WNNC_NET_SYMFONET: int
WNNC_NET_CLEARCASE: int
WNNC_NET_FRONTIER: int
WNNC_NET_BMC: int
WNNC_NET_DCE: int
WNNC_NET_DECORB: int
WNNC_NET_PROTSTOR: int
WNNC_NET_FJ_REDIR: int
WNNC_NET_DISTINCT: int
WNNC_NET_TWINS: int
WNNC_NET_RDR2SAMPLE: int
RESOURCE_CONNECTED: int
RESOURCE_GLOBALNET: int
RESOURCE_REMEMBERED: int
RESOURCE_RECENT: int
RESOURCE_CONTEXT: int
RESOURCETYPE_ANY: int
RESOURCETYPE_DISK: int
RESOURCETYPE_PRINT: int
RESOURCETYPE_RESERVED: int
RESOURCETYPE_UNKNOWN: int
RESOURCEUSAGE_CONNECTABLE: int
RESOURCEUSAGE_CONTAINER: int
RESOURCEUSAGE_NOLOCALDEVICE: int
RESOURCEUSAGE_SIBLING: int
RESOURCEUSAGE_ATTACHED: int
RESOURCEUSAGE_ALL: int
RESOURCEUSAGE_RESERVED: int
RESOURCEDISPLAYTYPE_GENERIC: int
RESOURCEDISPLAYTYPE_DOMAIN: int
RESOURCEDISPLAYTYPE_SERVER: int
RESOURCEDISPLAYTYPE_SHARE: int
RESOURCEDISPLAYTYPE_FILE: int
RESOURCEDISPLAYTYPE_GROUP: int
RESOURCEDISPLAYTYPE_NETWORK: int
RESOURCEDISPLAYTYPE_ROOT: int
RESOURCEDISPLAYTYPE_SHAREADMIN: int
RESOURCEDISPLAYTYPE_DIRECTORY: int
RESOURCEDISPLAYTYPE_TREE: int
RESOURCEDISPLAYTYPE_NDSCONTAINER: int
NETPROPERTY_PERSISTENT: int
CONNECT_UPDATE_PROFILE: int
CONNECT_UPDATE_RECENT: int
CONNECT_TEMPORARY: int
CONNECT_INTERACTIVE: int
CONNECT_PROMPT: int
CONNECT_NEED_DRIVE: int
CONNECT_REFCOUNT: int
CONNECT_REDIRECT: int
CONNECT_LOCALDRIVE: int
CONNECT_CURRENT_MEDIA: int
CONNECT_DEFERRED: int
CONNECT_RESERVED: int
CONNDLG_RO_PATH: int
CONNDLG_CONN_POINT: int
CONNDLG_USE_MRU: int
CONNDLG_HIDE_BOX: int
CONNDLG_PERSIST: int
CONNDLG_NOT_PERSIST: int
DISC_UPDATE_PROFILE: int
DISC_NO_FORCE: int
UNIVERSAL_NAME_INFO_LEVEL: int
REMOTE_NAME_INFO_LEVEL: int
WNFMT_MULTILINE: int
WNFMT_ABBREVIATED: int
WNFMT_INENUM: int
WNFMT_CONNECTION: int
NETINFO_DLL16: int
NETINFO_DISKRED: int
NETINFO_PRINTERRED: int
RP_LOGON: int
RP_INIFILE: int
PP_DISPLAYERRORS: int
WNCON_FORNETCARD: int
WNCON_NOTROUTED: int
WNCON_SLOWLINK: int
WNCON_DYNAMIC: int
NetSetupUnknown: int
NetSetupMachine: int
NetSetupWorkgroup: int
NetSetupDomain: int
NetSetupNonExistentDomain: int
NetSetupDnsMachine: int
NetSetupUnknownStatus: int
NetSetupUnjoined: int
NetSetupWorkgroupName: int
NetSetupDomainName: int
NetValidateAuthentication: int
NetValidatePasswordChange: int
NetValidatePasswordReset: int
ACCESS_ACCESS_LIST_INFOLEVEL: int
ACCESS_ACCESS_LIST_PARMNUM: int
SV_ALIST_MTIME_PARMNUM: int
SV_GLIST_MTIME_PARMNUM: int
SV_TYPE_LOCAL_LIST_ONLY: int
SV_ULIST_MTIME_PARMNUM: int
|
f954062eb022dbea225b9cae9b302f2c1b9b5d94
|
50c1abfbfde3554c61bb1d9f7057ba7aaa6488c0
|
/downloadaudio/downloaders/islex.py
|
b8ba50cc827d480f683819c033771ac4185a26ae
|
[] |
no_license
|
ospalh/anki-addons
|
f818a67f1c28f55c320b78c2606cae459d0b572a
|
4ece13423bd541e29d9b40ebe26ca0999a6962b1
|
refs/heads/develop
| 2023-08-08T16:56:15.390038
| 2021-02-03T09:26:45
| 2021-02-03T09:26:45
| 4,192,177
| 129
| 60
| null | 2023-07-21T13:54:46
| 2012-05-01T12:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,952
|
py
|
islex.py
|
# -*- mode: python; coding: utf-8 -*-
#
# Copyright © 2015 Daniel Eriksson <daniel@deriksson.se>
# Copyright © 2015 Roland Sieker <ospalh@gmail.com>
#
# License: GNU AGPL, version 3 or later;
# http://www.gnu.org/copyleft/agpl.html
"""
Download pronunciations for Icelandic from islex.is
"""
import urllib.request, urllib.parse, urllib.error
from .downloader import AudioDownloader
from ..download_entry import DownloadEntry
class IslexDownloader(AudioDownloader):
"""Download audio from Islex"""
def __init__(self):
AudioDownloader.__init__(self)
self.url = 'http://islex.is/'
self.icon_url = 'http://islex.is/'
self.file_extension = '.mp3'
self.field_data = None
def download_files(self, field_data):
self.downloads_list = []
if not self.language.lower().startswith('is'):
return
if field_data.split:
return
if not field_data.word:
return
self.field_data = field_data
# These flags were used by an advanced search on Islex. They
# work, but not all of them may be needed.
qdict = {'finna': 1, 'dict': 'SE', 'erflokin': 1, 'nlo': 1, 'fuzz': 1,
'samleit': field_data.word.encode('utf-8')}
soup = self.get_soup_from_url(
self.url + 'se?' + urllib.parse.urlencode(qdict))
if soup.findAll(attrs=dict(id='ord')):
# When we have a table tag with id="ord" we (probably)
# have just one word. Use that.
self.download_audio_for_soup(soup)
else:
# More than one word. Or 0 words.
links = soup.find(
attrs={'class': 'leitres'}).find('ul').findAll('a')
# When we have 0 words this raises some exception. Which is fine.
for a in links:
try:
word_soup = self.get_soup_from_url(self.url + a['href'])
self.download_audio_for_soup(word_soup)
except (AttributeError, KeyError): # What else could go wrong?
continue
def download_audio_for_soup(self, soup):
self.maybe_get_icon()
extras = {'Source': 'Islex'}
# Try to get Part of Speech/gender
try:
extras['Type'] = soup.find('table', id='flettuhaus').find(
'span', {'class': 'ofl'}).getText()
except AttributeError:
pass
entry = DownloadEntry(
self.field_data,
self.get_tempfile_from_url(
self.url + soup.find('audio').find(
'source', type="audio/mp3")['src']),
extras, self.site_icon)
# Try to get Source text
try:
entry.word = soup.find('table', id='flettuhaus').find(
'span', {'class': 'fletta'}).getText()
except AttributeError:
pass
self.downloads_list.append(entry)
|
5fdc9d1c9016452f4d77839533271754451fe8c7
|
a1bf73ad2ed4739886d3a9c5e3b337e67c36f5cb
|
/run_docs.py
|
4ba9a32df9841183353bc73ee6ec984813cb79ca
|
[
"BSD-2-Clause"
] |
permissive
|
mottosso/cmdx
|
679eccdb892218f1f4f2ba10f5ba50a139fb55f4
|
814c7d934b13ba8735736424b69d0b89639157fa
|
refs/heads/master
| 2022-11-05T12:23:44.265773
| 2022-02-28T06:52:16
| 2022-02-28T06:52:16
| 200,228,452
| 156
| 41
|
BSD-2-Clause
| 2022-05-29T22:23:29
| 2019-08-02T12:10:14
|
Python
|
UTF-8
|
Python
| false
| false
| 50
|
py
|
run_docs.py
|
"""Run each example in the README as a doctest"""
|
1ac291cfbe88a972e1449cea1313ab71d80340e9
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/extractmethod/File.before.py
|
65466c5b192259320f2907ed337b6341794784cc
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
File.before.py
|
def __init__(self):
for base in self__class__.__bases__:
<selection>try: base.__init__(self)
except AttributeError: pass</selection>
|
74573fcbfb96be80a51d7a5dc69ce0eaeb6b751b
|
90f9bdb537573ae3081e2a93e05cfc93b5f99612
|
/examples/2_fold/plot_refolded_folds.py
|
8b6f59ab8f67477b7b7cd9caafaa13798e0ba328
|
[
"MIT"
] |
permissive
|
Loop3D/LoopStructural
|
d0fa201d9ff8d99fb47006e3def0fbfb30ece5c4
|
c6175623450dbc79ed06ed8d8bbff21b63fc8b4c
|
refs/heads/master
| 2023-06-25T21:17:47.595673
| 2023-06-19T00:40:20
| 2023-06-19T00:40:20
| 181,411,760
| 123
| 21
|
MIT
| 2023-06-19T00:49:32
| 2019-04-15T04:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,437
|
py
|
plot_refolded_folds.py
|
"""
2b. Refolded folds
===================
"""
from LoopStructural import GeologicalModel
from LoopStructural.visualisation import LavaVuModelViewer, RotationAnglePlotter
from LoopStructural.datasets import load_laurent2016
import numpy as np
import pandas as pd
import glob
import os
import matplotlib.pyplot as plt
import logging
# logging.getLogger().setLevel(logging.INFO)
# load in the data from the provided examples
data, bb = load_laurent2016()
# bb[1,2] = 10000
data.head()
newdata = pd.DataFrame(
[[5923.504395, 4748.135254, 3588.621094, "s2", 1.0]],
columns=["X", "Y", "Z", "feature_name", "val"],
)
data = pd.concat([data, newdata], sort=False)
rotation = [-69.11979675292969, 15.704944610595703, 6.00014591217041]
######################################################################
# Modelling S2
# ~~~~~~~~~~~~
#
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s2 = model.create_and_add_fold_frame(
"s2", nelements=10000, buffer=0.5, solver="lu", damp=True
)
viewer = LavaVuModelViewer(model)
viewer.add_scalar_field(s2[0], cmap="prism")
viewer.add_isosurface(s2[0], slices=[0, 1])
viewer.add_data(s2[0])
viewer.rotate(rotation)
viewer.display()
######################################################################
# Modelling S1
# ~~~~~~~~~~~~
#
s1 = model.create_and_add_folded_fold_frame(
"s1", limb_wl=4, av_fold_axis=True, nelements=50000, buffer=0.3, solver="lu"
)
viewer = LavaVuModelViewer(model)
viewer.add_scalar_field(s1[0], cmap="prism")
viewer.rotate([-69.11979675292969, 15.704944610595703, 6.00014591217041])
viewer.display()
######################################################################
# S2/S1 S-Plots
# ~~~~~~~~~~~~~
#
s2_s1_splot = RotationAnglePlotter(s1)
s2_s1_splot.add_fold_limb_data()
s2_s1_splot.add_fold_limb_curve()
# fig, ax = plt.subplots(1,2,figsize=(10,5))
# x = np.linspace(s2[0].min(),s2[0].max(),1000)
# ax[0].plot(x,s1['fold'].fold_limb_rotation(x))
# ax[0].plot(s1['fold'].fold_limb_rotation.fold_frame_coordinate,s1['fold'].fold_limb_rotation.rotation_angle,'bo')
# ax[1].plot(s1['limb_svariogram'].lags,s1['limb_svariogram'].variogram,'bo')
######################################################################
# Modelling S0
# ~~~~~~~~~~~~
#
s0 = model.create_and_add_folded_foliation(
"s0",
limb_wl=1.0,
av_fold_axis=True,
nelements=50000,
buffer=0.2,
damp=True,
solver="lu",
)
viewer = LavaVuModelViewer(model)
viewer.add_scalar_field(s0, cmap="tab20")
viewer.rotate([-69.11979675292969, 15.704944610595703, 6.00014591217041])
viewer.display()
######################################################################
# S1/S0 S-Plots
# ~~~~~~~~~~~~~
#
s1_s0_splot = RotationAnglePlotter(s0)
s1_s0_splot.add_fold_limb_data()
s1_s0_splot.add_fold_limb_curve()
# fig, ax = plt.subplots(1,2,figsize=(10,5))
# x = np.linspace(s1[0].min(),s1[0].max(),1000)
# ax[0].plot(x,s0['fold'].fold_limb_rotation(x))
# ax[0].plot(s0['fold'].fold_limb_rotation.fold_frame_coordinate,s0['fold'].fold_limb_rotation.rotation_angle,'bo')
# ax[1].plot(s0['limb_svariogram'].lags,s1['limb_svariogram'].variogram,'bo')
viewer = LavaVuModelViewer(model)
viewer.add_isosurface(s0, nslices=10, paint_with=s0, cmap="tab20")
# viewer.add_data(s0)
# viewer.add_fold(s0['fold'],locations=s0['support'].barycentre[::80])
viewer.rotate([-69.11979675292969, 15.704944610595703, 6.00014591217041])
viewer.display()
|
c962857a1692c589c42a89dcf3dd4a54e3ade3ae
|
77fd60c4b7e7885b2ec4ca5203edf9489f6f37dc
|
/nipy/externals/transforms3d/__init__.py
|
8c93067bc074997636f018435bf64a3144728c02
|
[
"BSD-3-Clause"
] |
permissive
|
nipy/nipy
|
156f379adbc07b259e25012662510b1f64aac4c5
|
7eede02471567487e454016c1e7cf637d3afac9e
|
refs/heads/master
| 2023-04-06T14:56:36.303421
| 2023-04-05T19:40:24
| 2023-04-05T19:40:24
| 642,344
| 275
| 115
|
BSD-3-Clause
| 2023-04-05T19:40:25
| 2010-05-02T10:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
__init__.py
|
''' Copies from transforms3d package
Please see github.com/matthew-brett/transforms3d
'''
from __future__ import absolute_import
from . import quaternions
|
e34d37b2838dcda5ab07a700efd0d8b29c8eee43
|
3c42143cfaab36b3f451586ced0112bcdb9e970c
|
/tests/setup.py
|
1bd19ced58f0922161e98fdc2788c6eab697e57e
|
[
"MIT"
] |
permissive
|
wouterboomsma/eigency
|
c6217c434ba33a36ff54424bb8a4b5ea232b9659
|
baa8fe5abf4faa6ce080aa18c7c3e9cd730917b5
|
refs/heads/master
| 2023-04-14T06:37:52.250144
| 2023-03-26T06:17:22
| 2023-03-26T06:17:22
| 55,784,012
| 110
| 28
|
MIT
| 2023-03-26T06:17:23
| 2016-04-08T14:13:38
|
Cython
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
setup.py
|
from Cython.Build import cythonize
from setuptools import setup
from setuptools.extension import Extension
import eigency
extensions = [
Extension(
"eigency_tests.eigency_tests",
["eigency_tests/eigency_tests.pyx"],
include_dirs=[".", "eigency_tests"] + eigency.get_includes(),
),
]
setup(
name="eigency-tests",
version="0.0.0",
ext_modules=cythonize(
extensions,
compiler_directives=dict(
language_level="3",
),
),
packages=["eigency_tests"],
)
|
d7920c0dab2ba82c9ebaf0b12f68059cb488c2a8
|
080a9fea665ee362ba295b8687c22ad6f458bac5
|
/compiler/python_archive.py
|
2c69c586c1583462d4be3a3adec218ab24208c94
|
[
"Apache-2.0"
] |
permissive
|
google/subpar
|
6c8627365b6f11d3e5302f72db6e843583fb9fb2
|
5c486705da7fece4739015ce566423a8fd89916f
|
refs/heads/master
| 2023-07-31T21:41:21.363468
| 2022-12-19T19:23:19
| 2022-12-19T19:23:19
| 64,876,050
| 601
| 85
|
Apache-2.0
| 2022-12-27T14:53:54
| 2016-08-03T20:11:35
|
Python
|
UTF-8
|
Python
| false
| false
| 12,040
|
py
|
python_archive.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a single-file executable from multiple python source files.
The final product is a hybrid file. The start of the file is a Unix
shell script or similar, the end of the file is a ZIP archive. This
actually works.
It is inspired by the TinyPar tool by Springer, which is inspired by
the Autopar tool by Greiman. It is less capable than either of these
tools, but is not tied to internal Google tools or infrastructure.
The command line flags and environment variables are intended to match
those two tools.
See also https://www.python.org/dev/peps/pep-0441/
"""
from datetime import datetime
import contextlib
import errno
import io
import logging
import os
import pkgutil
import re
import sys
import tempfile
import zipfile
from subpar.compiler import error
from subpar.compiler import manifest_parser
from subpar.compiler import stored_resource
# Boilerplate code added to __main__.py
_boilerplate_template = """\
# Boilerplate added by subpar/compiler/python_archive.py
from %(runtime_package)s import support as _
_.setup(import_roots=%(import_roots)s, zip_safe=%(zip_safe)s)
del _
# End boilerplate
"""
# Boilerplate must be after the last __future__ import. See
# https://docs.python.org/2/reference/simple_stmts.html#future
_boilerplate_insertion_regex = re.compile('''(?sx)
(?P<before>
(
(
([#][^\\r\\n]*) | # comment
(\\s*) | # whitespace
(from\\s+__future__\\s+import\\s+[^\\r\\n]+) | # future import
('[^'].*?') | # module doc comment form 1
("[^"].*?") | # module doc comment form 2
(\'\'\'.*?(\'\'\')) | # module doc comment form 3
(""".*?""") # module doc comment form 4
)
[\\r\\n]+ # end of line(s) for Mac, Unix and/or Windows
)*
)
# Boilerplate is inserted here
(?P<after>.*)
''')
# Fully qualified names of subpar packages
_subpar_package = 'subpar'
_compiler_package = _subpar_package + '.compiler'
_runtime_package = _subpar_package + '.runtime'
# List of files from the runtime package to include in every .par file
_runtime_support_files = ['support.py']
# List of zero-length files to include in every .par file
_runtime_init_files = [
'subpar/__init__.py',
'subpar/runtime/__init__.py',
]
class PythonArchive(object):
"""Contains all the necessary information to generate a .par file"""
# pylint: disable=too-many-arguments
def __init__(self,
main_filename,
import_roots,
interpreter,
manifest_filename,
manifest_root,
output_filename,
timestamp,
zip_safe,
):
self.main_filename = main_filename
self.import_roots = import_roots
self.interpreter = interpreter
self.manifest_filename = manifest_filename
self.manifest_root = manifest_root
self.output_filename = output_filename
# Convert to the format ZipInfo expects
t = datetime.utcfromtimestamp(timestamp)
self.timestamp_tuple = t.timetuple()[0:6]
self.zip_safe = zip_safe
self.compression = zipfile.ZIP_DEFLATED
def create(self):
"""Create a .par file on disk
Raises:
Error, IOError, SystemError
"""
logging.info('Compiling under python %s...', sys.version)
logging.info('Making parfile [%s]...', self.output_filename)
remove_if_present(self.output_filename)
# Assemble list of files to include
logging.debug('Compiling file list from [%s]', self.manifest_filename)
manifest = manifest_parser.parse(self.manifest_filename)
# Validate manifest and add various extra files to the list
stored_resources = self.scan_manifest(manifest)
# Create parfile in temporary file
temp_parfile = self.create_temp_parfile()
try:
logging.debug('Writing parfile to temp file [%s]...',
temp_parfile.name)
self.write_bootstrap(temp_parfile)
self.write_zip_data(temp_parfile, stored_resources)
temp_parfile.close()
# Flushed and closed tempfile, may now rename it safely
self.create_final_from_temp(temp_parfile.name)
finally:
remove_if_present(temp_parfile.name)
logging.info('Success!')
def create_temp_parfile(self):
"""Create the first part of a parfile.
Returns:
A file-like object with a 'name' attribute
"""
# Create in same directory as final filename so we can atomically rename
output_dir = os.path.dirname(self.output_filename)
return tempfile.NamedTemporaryFile(dir=output_dir, delete=False)
def generate_boilerplate(self, import_roots):
"""Generate boilerplate to be insert into __main__.py
We don't know the encoding of the main source file, so
require that the template be pure ascii, which we can safely
insert.
Returns:
A string containing only ascii characters
"""
boilerplate_contents = _boilerplate_template % {
'runtime_package': _runtime_package,
'import_roots': str(import_roots),
'zip_safe': self.zip_safe,
}
return boilerplate_contents.encode('ascii').decode('ascii')
def generate_main(self, main_filename, boilerplate_contents):
"""Generate the contents of the __main__.py file
We take the module that is specified as the main entry point,
and insert some boilerplate to invoke import helper code.
Returns:
A StoredResource
"""
# Read main source file, in unknown encoding. We use latin-1
# here, but any single-byte encoding that doesn't raise errors
# would work.
with io.open(main_filename, 'rt', encoding='latin-1') as main_file:
original_content = main_file.read()
# Find a good place to insert the boilerplate, which is the
# first line that is not a comment, blank line, doc comment,
# or future import.
match = re.match(_boilerplate_insertion_regex, original_content)
assert match, original_content
assert (len(match.group('before')) + len(match.group('after'))) == \
len(original_content), (match, original_content)
new_content = (match.group('before') +
boilerplate_contents +
match.group('after'))
# Insert boilerplate (might be beginning, middle or end)
encoded_content = new_content.encode('latin-1')
return stored_resource.StoredContent(
'__main__.py', self.timestamp_tuple, encoded_content)
def scan_manifest(self, manifest):
"""Return a dict of StoredResources based on an input manifest.
Returns:
A dict of store_filename to StoredResource
"""
# Extend the list of import roots to include workspace roots
top_roots = set()
for stored_path in manifest.keys():
if '/' in stored_path: # Zip file paths use / on all platforms
top_dir = stored_path.split('/', 1)[0]
if top_dir not in top_roots:
top_roots.add(top_dir)
import_roots = list(self.import_roots) + sorted(top_roots)
# Include some files that every .par file needs at runtime
stored_resources = {}
for support_file in _runtime_support_files:
resource = fetch_support_file(support_file, self.timestamp_tuple)
stored_filename = resource.zipinfo.filename
stored_resources[stored_filename] = resource
# Scan manifest
for stored_path, local_path in manifest.items():
if local_path is None:
stored_resources[stored_path] = stored_resource.EmptyFile(
stored_path, self.timestamp_tuple)
else:
stored_resources[stored_path] = stored_resource.StoredFile(
stored_path, self.timestamp_tuple, local_path)
# Copy main entry point to well-known name
if '__main__.py' in stored_resources:
raise error.Error(
('Configuration error for [%s]: Manifest file included a '
'file named __main__.py, which is not allowed') %
self.manifest_filename)
stored_resources['__main__.py'] = self.generate_main(
self.main_filename, self.generate_boilerplate(import_roots))
# Add an __init__.py for each parent package of the support files
for stored_filename in _runtime_init_files:
if stored_filename in stored_resources:
logging.debug('Skipping __init__.py already present [%s]',
stored_filename)
continue
stored_resources[stored_filename] = stored_resource.EmptyFile(
stored_filename, self.timestamp_tuple)
return stored_resources
def write_bootstrap(self, temp_parfile):
"""Write the first part of the parfile
This tells the operating system (well, UNIX) how to execute the file.
"""
logging.debug('Writing boilerplate...')
boilerplate = '#!%s\n' % self.interpreter
temp_parfile.write(boilerplate.encode('ascii'))
def write_zip_data(self, temp_parfile, stored_resources):
"""Write the second part of a parfile, consisting of ZIP data
Args:
stored_resources: A dictionary mapping relative path to the
content to store at that path.
"""
logging.debug('Storing Files...')
with contextlib.closing(zipfile.ZipFile(temp_parfile, 'w', self.compression)) as z:
items = sorted(stored_resources.items())
for relative_path, resource in items:
assert resource.zipinfo.filename == relative_path
resource.store(z)
def create_final_from_temp(self, temp_parfile_name):
"""Move newly created parfile to its final filename."""
# Python 2 doesn't have os.replace, so use os.rename which is
# not atomic in all cases.
os.chmod(temp_parfile_name, 0o0755)
os.rename(temp_parfile_name, self.output_filename)
def remove_if_present(filename):
"""Delete a file if it exists"""
try:
# Remove atomically
os.remove(filename)
except OSError as exc:
# Ignore failure if file does not exist
if exc.errno != errno.ENOENT:
raise
def fetch_support_file(name, timestamp_tuple):
"""Read a file from the runtime package
Args:
name: filename in runtime package's directory
timestamp_tuple: Stored timestamp, as ZipInfo tuple
Returns:
A StoredResource representing the content of that file
"""
stored_filename = os.path.join(_subpar_package, 'runtime', name)
content = pkgutil.get_data(_subpar_package, 'runtime/' + name)
# content is None means the file wasn't found. content == '' is
# valid, it means the file was found and was empty.
if content is None:
raise error.Error(
'Internal error: Can\'t find runtime support file [%s]' % name)
return stored_resource.StoredContent(
stored_filename, timestamp_tuple, content)
|
018a44cd331b5aaa1fd39fb1b7e739f3760fe511
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/kubeflow-katib/mpi-job-horovod.py
|
5476677651eb4aaa8958b19821e8f35a24380ff2
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,953
|
py
|
mpi-job-horovod.py
|
# Kubeflow Pipeline with Katib component.
# In this example you will create Katib Experiment using Bayesian optimization algorithm.
# As a Trial template you will use Kubeflow MPIJob with Horovod mnist training container.
# After that, you will compile a Kubeflow Pipeline with your Katib Experiment.
# Use Kubeflow Pipelines UI to upload the Pipeline and create the Run.
# This Experiment is similar to this: https://github.com/kubeflow/katib/blob/master/examples/v1beta1/mpijob-horovod.yaml
# Check the training container source code here: https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod.
# Note: To run this example, your Kubernetes cluster should run MPIJob operator.
# Follow this guide to install MPIJob on your cluster: https://www.kubeflow.org/docs/components/training/mpi/
# Note: You have to install kfp>=1.1.1 SDK and kubeflow-katib>=0.10.1 SDK to run this example.
import kfp
import kfp.dsl as dsl
from kfp import components
from kubeflow.katib import ApiClient
from kubeflow.katib import V1beta1ExperimentSpec
from kubeflow.katib import V1beta1AlgorithmSpec
from kubeflow.katib import V1beta1AlgorithmSetting
from kubeflow.katib import V1beta1ObjectiveSpec
from kubeflow.katib import V1beta1ParameterSpec
from kubeflow.katib import V1beta1FeasibleSpace
from kubeflow.katib import V1beta1TrialTemplate
from kubeflow.katib import V1beta1TrialParameterSpec
@dsl.pipeline(
name="Launch Katib MPIJob Experiment",
description="An example to launch Katib Experiment with MPIJob"
)
def horovod_mnist_hpo():
# Experiment name and namespace.
experiment_name = "mpi-horovod-mnist"
experiment_namespace = "anonymous"
# Trial count specification.
max_trial_count = 6
max_failed_trial_count = 3
parallel_trial_count = 2
# Objective specification.
objective = V1beta1ObjectiveSpec(
type="minimize",
goal=0.01,
objective_metric_name="loss",
)
# Algorithm specification.
algorithm = V1beta1AlgorithmSpec(
algorithm_name="bayesianoptimization",
algorithm_settings=[
V1beta1AlgorithmSetting(
name="random_state",
value="10"
)
]
)
# Experiment search space.
# In this example we tune learning rate and number of training steps.
parameters = [
V1beta1ParameterSpec(
name="lr",
parameter_type="double",
feasible_space=V1beta1FeasibleSpace(
min="0.001",
max="0.003"
),
),
V1beta1ParameterSpec(
name="num-steps",
parameter_type="int",
feasible_space=V1beta1FeasibleSpace(
min="50",
max="150",
step="10"
),
),
]
# JSON template specification for the Trial's Worker Kubeflow MPIJob.
trial_spec = {
"apiVersion": "kubeflow.org/v1",
"kind": "MPIJob",
"spec": {
"slotsPerWorker": 1,
"cleanPodPolicy": "Running",
"mpiReplicaSpecs": {
"Launcher": {
"replicas": 1,
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"image": "docker.io/kubeflow/mpi-horovod-mnist",
"name": "mpi-launcher",
"command": [
"mpirun"
],
"args": [
"-np",
"2",
"--allow-run-as-root",
"-bind-to",
"none",
"-map-by",
"slot",
"-x",
"LD_LIBRARY_PATH",
"-x",
"PATH",
"-mca",
"pml",
"ob1",
"-mca",
"btl",
"^openib",
"python",
"/examples/tensorflow_mnist.py",
"--lr",
"${trialParameters.learningRate}",
"--num-steps",
"${trialParameters.numberSteps}"
],
"resources": {
"limits": {
"cpu": "500m",
"memory": "2Gi"
}
}
}
]
}
}
},
"Worker": {
"replicas": 2,
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"image": "docker.io/kubeflow/mpi-horovod-mnist",
"name": "mpi-worker",
"resources": {
"limits": {
"cpu": "500m",
"memory": "4Gi"
}
}
}
]
}
}
}
}
}
}
# Configure parameters for the Trial template.
trial_template = V1beta1TrialTemplate(
primary_pod_labels={
"mpi-job-role": "launcher"
},
primary_container_name="mpi-launcher",
success_condition='status.conditions.#(type=="Succeeded")#|#(status=="True")#',
failure_condition='status.conditions.#(type=="Failed")#|#(status=="True")#',
trial_parameters=[
V1beta1TrialParameterSpec(
name="learningRate",
description="Learning rate for the training model",
reference="lr"
),
V1beta1TrialParameterSpec(
name="numberSteps",
description="Number of training steps",
reference="num-steps"
),
],
trial_spec=trial_spec
)
# Create Experiment specification.
experiment_spec = V1beta1ExperimentSpec(
max_trial_count=max_trial_count,
max_failed_trial_count=max_failed_trial_count,
parallel_trial_count=parallel_trial_count,
objective=objective,
algorithm=algorithm,
parameters=parameters,
trial_template=trial_template
)
# Get the Katib launcher.
# Load component from the URL or from the file.
katib_experiment_launcher_op = components.load_component_from_url(
"https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml")
# katib_experiment_launcher_op = components.load_component_from_file(
# "../../../components/kubeflow/katib-launcher/component.yaml"
# )
# Katib launcher component.
# Experiment Spec should be serialized to a valid Kubernetes object.
# The Experiment is deleted after the Pipeline is finished.
op = katib_experiment_launcher_op(
experiment_name=experiment_name,
experiment_namespace=experiment_namespace,
experiment_spec=ApiClient().sanitize_for_serialization(experiment_spec),
experiment_timeout_minutes=60)
# Output container to print the results.
dsl.ContainerOp(
name="best-hp",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo Best HyperParameters: %s" % op.output],
)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(horovod_mnist_hpo, __file__ + ".tar.gz")
|
0685257c7035cec967347e854e5d5d9770d6b9cf
|
75e79c6369a2399f90582fdda7633d34dfcf35bd
|
/tests/unit/config/old_test_configfile.py
|
1ee97053191a075f5b74088723d6e742048c8f24
|
[
"MIT"
] |
permissive
|
LLNL/merlin
|
9080b042389edd4a5747b9af8c4521422d1f88bf
|
d8bfbdbd5a07a314064ce24a6201662a34fc8028
|
refs/heads/develop
| 2023-09-04T07:57:43.780163
| 2023-08-22T19:54:18
| 2023-08-22T19:54:18
| 222,809,980
| 105
| 20
|
MIT
| 2023-08-22T19:54:20
| 2019-11-19T23:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,708
|
py
|
old_test_configfile.py
|
"""Tests for the configfile module."""
import os
import shutil
import tempfile
import unittest
from getpass import getuser
from merlin.config import configfile
from .utils import mkfile
CONFIG_FILE_CONTENTS = """
celery:
certs: path/to/celery/config/files
broker:
name: rabbitmq
username: testuser
password: rabbit.password # The filename that contains the password.
server: jackalope.llnl.gov
results_backend:
name: mysql
dbname: testuser
username: mlsi
password: mysql.password # The filename that contains the password.
server: rabbit.llnl.gov
"""
class TestFindConfigFile(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.appfile = mkfile(self.tmpdir, "app.yaml")
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_tempdir(self):
self.assertTrue(os.path.isdir(self.tmpdir))
def test_find_config_file(self):
"""
Given the path to a vaild config file, find and return the full
filepath.
"""
path = configfile.find_config_file(path=self.tmpdir)
expected = os.path.join(self.tmpdir, self.appfile)
self.assertEqual(path, expected)
def test_find_config_file_error(self):
"""Given an invalid path, return None."""
invalid = "invalid/path"
expected = None
path = configfile.find_config_file(path=invalid)
self.assertEqual(path, expected)
class TestConfigFile(unittest.TestCase):
"""Unit tests for loading the config file."""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.configfile = mkfile(self.tmpdir, "app.yaml", content=CONFIG_FILE_CONTENTS)
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_get_config(self):
"""
Given the directory path to a valid merlin config file, then
`get_config` should find the merlin config file and load the YAML
contents to a dictionary.
"""
expected = {
"broker": {
"name": "rabbitmq",
"password": "rabbit.password",
"server": "jackalope.llnl.gov",
"username": "testuser",
"vhost": getuser(),
},
"celery": {"certs": "path/to/celery/config/files"},
"results_backend": {
"dbname": "testuser",
"name": "mysql",
"password": "mysql.password",
"server": "rabbit.llnl.gov",
"username": "mlsi",
},
}
self.assertDictEqual(configfile.get_config(self.tmpdir), expected)
|
40fd21503f39f1d2456e02fb89da73db02739780
|
e62c8ee151671b999c6720ab8c2aa2f96c0d7f55
|
/tests/unit/samples/wiringstringids/package/subpackage/submodule.py
|
6e9fa936334ddee3df198295a65c4bccdfe46e65
|
[] |
permissive
|
ets-labs/python-dependency-injector
|
45645973456bb6494386ad12103d06e1f1be2cd8
|
cc2304e46e054ae08dc12995428759fbfb51af10
|
refs/heads/master
| 2023-08-23T03:59:53.509743
| 2022-12-19T03:14:24
| 2022-12-19T03:14:24
| 28,774,758
| 3,217
| 273
|
BSD-3-Clause
| 2023-09-08T21:46:18
| 2015-01-04T13:23:05
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
submodule.py
|
from dependency_injector.wiring import inject, Provide
from ...service import Service
@inject
def test_function(service: Service = Provide["service"]):
return service
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.