blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b39a5723d99f17834ae7dc99b842d352a6da828c
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/redis/client.py
|
df1ebfd757b3dca1121fdd199cbc61188fe63041
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 146,267
|
py
|
client.py
|
from __future__ import unicode_literals
from itertools import chain
import datetime
import sys
import warnings
import time
import threading
import time as mod_time
import re
import hashlib
from redis._compat import (basestring, imap, iteritems, iterkeys,
itervalues, izip, long, nativestr, safe_unicode)
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection)
from redis.lock import Lock
from redis.exceptions import (
ConnectionError,
DataError,
ExecAbortError,
NoScriptError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
SYM_EMPTY = b''
EMPTY_RESPONSE = 'EMPTY_RESPONSE'
def list_or_args(keys, args):
# returns a single new list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (basestring, bytes)):
keys = [keys]
else:
keys = list(keys)
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
def dict_merge(*dicts):
merged = {}
for d in dicts:
merged.update(d)
return merged
class CaseInsensitiveDict(dict):
"Case insensitive dict implementation. Assumes string keys only."
def __init__(self, data):
for k, v in iteritems(data):
self[k.upper()] = v
def __contains__(self, k):
return super(CaseInsensitiveDict, self).__contains__(k.upper())
def __delitem__(self, k):
super(CaseInsensitiveDict, self).__delitem__(k.upper())
def __getitem__(self, k):
return super(CaseInsensitiveDict, self).__getitem__(k.upper())
def get(self, k, default=None):
return super(CaseInsensitiveDict, self).get(k.upper(), default)
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k.upper(), v)
def update(self, data):
data = CaseInsensitiveDict(data)
super(CaseInsensitiveDict, self).update(data)
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = nativestr(response)
response = 'type:' + response
response = dict(kv.split(':') for kv in response.split())
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
if infotype in ('idletime', 'refcount'):
return int_or_none(response)
return response
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
response = nativestr(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
# Split, the info fields keys and values.
# Note that the value may contain ':'. but the 'host:'
# pseudo-command is the only case where the key contains ':'
key, value = line.split(':', 1)
if key == 'cmdstat_host':
key, value = line.rsplit(':', 1)
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
SENTINEL_STATE_TYPES = {
'can-failover-its-master': int,
'config-epoch': int,
'down-after-milliseconds': int,
'failover-timeout': int,
'info-refresh': int,
'last-hello-message': int,
'last-ok-ping-reply': int,
'last-ping-reply': int,
'last-ping-sent': int,
'master-link-down-time': int,
'master-port': int,
'num-other-sentinels': int,
'num-slaves': int,
'o-down-time': int,
'pending-commands': int,
'parallel-syncs': int,
'port': int,
'quorum': int,
'role-reported-time': int,
's-down-time': int,
'slave-priority': int,
'slave-repl-offset': int,
'voted-leader-epoch': int
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result['flags'].split(','))
for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
('is_sdown', 's_down'), ('is_odown', 'o_down'),
('is_sentinel', 'sentinel'),
('is_disconnected', 'disconnected'),
('is_master_down', 'master_down')):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(imap(nativestr, response))
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(imap(nativestr, item))
result[state['name']] = state
return result
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(imap(nativestr, item)) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response, decode_keys=False):
"Create a dict given a list of key/value pairs"
if response is None:
return {}
if decode_keys:
# the iter form is faster, but I don't know how to make that work
# with a nativestr() map
return dict(izip(imap(nativestr, response[::2]), response[1::2]))
else:
it = iter(response)
return dict(izip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in izip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except Exception:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options.get('withscores'):
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(izip(it, imap(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options.get('groups'):
return response
n = options['groups']
return list(izip(*[response[i::n] for i in range(n)]))
def int_or_none(response):
if response is None:
return None
return int(response)
def nativestr_or_none(response):
if response is None:
return None
return nativestr(response)
def parse_stream_list(response):
if response is None:
return None
data = []
for r in response:
if r is not None:
data.append((r[0], pairs_to_dict(r[1])))
else:
data.append((None, None))
return data
def pairs_to_dict_with_nativestr_keys(response):
return pairs_to_dict(response, decode_keys=True)
def parse_list_of_dicts(response):
return list(imap(pairs_to_dict_with_nativestr_keys, response))
def parse_xclaim(response, **options):
if options.get('parse_justid', False):
return response
return parse_stream_list(response)
def parse_xinfo_stream(response):
data = pairs_to_dict(response, decode_keys=True)
first = data['first-entry']
if first is not None:
data['first-entry'] = (first[0], pairs_to_dict(first[1]))
last = data['last-entry']
if last is not None:
data['last-entry'] = (last[0], pairs_to_dict(last[1]))
return data
def parse_xread(response):
if response is None:
return []
return [[r[0], parse_stream_list(r[1])] for r in response]
def parse_xpending(response, **options):
if options.get('parse_detail', False):
return parse_xpending_range(response)
consumers = [{'name': n, 'pending': long(p)} for n, p in response[3] or []]
return {
'pending': response[0],
'min': response[1],
'max': response[2],
'consumers': consumers
}
def parse_xpending_range(response):
k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered')
return [dict(izip(k, r)) for r in response]
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response):
return nativestr(response) == 'OK'
def parse_zadd(response, **options):
if response is None:
return None
if options.get('as_score'):
return float(response)
return int(response)
def parse_client_list(response, **options):
clients = []
for c in nativestr(response).splitlines():
# Values might contain '='
clients.append(dict(pair.split('=', 1) for pair in c.split(' ')))
return clients
def parse_config_get(response, **options):
response = [nativestr(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return long(cursor), r
def parse_hscan(response, **options):
cursor, r = response
return long(cursor), r and pairs_to_dict(r) or {}
def parse_zscan(response, **options):
score_cast_func = options.get('score_cast_func', float)
cursor, r = response
it = iter(r)
return long(cursor), list(izip(it, imap(score_cast_func, it)))
def parse_slowlog_get(response, **options):
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command': b' '.join(item[3])
} for item in response]
def parse_cluster_info(response, **options):
response = nativestr(response)
return dict(line.split(':') for line in response.splitlines() if line)
def _parse_node_line(line):
line_items = line.split(' ')
node_id, addr, flags, master_id, ping, pong, epoch, \
connected = line.split(' ')[:8]
slots = [sl.split('-') for sl in line_items[8:]]
node_dict = {
'node_id': node_id,
'flags': flags,
'master_id': master_id,
'last_ping_sent': ping,
'last_pong_rcvd': pong,
'epoch': epoch,
'slots': slots,
'connected': True if connected == 'connected' else False
}
return addr, node_dict
def parse_cluster_nodes(response, **options):
response = nativestr(response)
raw_lines = response
if isinstance(response, basestring):
raw_lines = response.splitlines()
return dict(_parse_node_line(line) for line in raw_lines)
def parse_georadius_generic(response, **options):
if options['store'] or options['store_dist']:
# `store` and `store_diff` cant be combined
# with other command arguments.
return response
if type(response) != list:
response_list = [response]
else:
response_list = response
if not options['withdist'] and not options['withcoord']\
and not options['withhash']:
# just a bunch of places
return response_list
cast = {
'withdist': float,
'withcoord': lambda ll: (float(ll[0]), float(ll[1])),
'withhash': int
}
# zip all output results with each casting functino to get
# the properly native Python value.
f = [lambda x: x]
f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]]
return [
list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list
]
def parse_pubsub_numsub(response, **options):
return list(zip(response[0::2], response[1::2]))
def parse_client_kill(response, **options):
if isinstance(response, (long, int)):
return int(response)
return nativestr(response) == 'OK'
class Redis(object):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict(
'AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
bool
),
string_keys_to_dict(
'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN '
'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD '
'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN '
'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM '
'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
string_keys_to_dict(
'INCRBYFLOAT HINCRBYFLOAT',
float
),
string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK'
),
string_keys_to_dict('SORT', sort_return_tuples),
string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none),
string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE '
'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ',
bool_ok
),
string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
string_keys_to_dict(
'SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
string_keys_to_dict(
'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
string_keys_to_dict('BZPOPMIN BZPOPMAX', \
lambda r: r and (r[0], r[1], float(r[2])) or None),
string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list),
string_keys_to_dict('XREAD XREADGROUP', parse_xread),
string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
{
'CLIENT GETNAME': lambda r: r and nativestr(r),
'CLIENT ID': int,
'CLIENT KILL': parse_client_kill,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False,
'CLIENT PAUSE': bool_ok,
'CLUSTER ADDSLOTS': bool_ok,
'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x),
'CLUSTER COUNTKEYSINSLOT': lambda x: int(x),
'CLUSTER DELSLOTS': bool_ok,
'CLUSTER FAILOVER': bool_ok,
'CLUSTER FORGET': bool_ok,
'CLUSTER INFO': parse_cluster_info,
'CLUSTER KEYSLOT': lambda x: int(x),
'CLUSTER MEET': bool_ok,
'CLUSTER NODES': parse_cluster_nodes,
'CLUSTER REPLICATE': bool_ok,
'CLUSTER RESET': bool_ok,
'CLUSTER SAVECONFIG': bool_ok,
'CLUSTER SET-CONFIG-EPOCH': bool_ok,
'CLUSTER SETSLOT': bool_ok,
'CLUSTER SLAVES': parse_cluster_nodes,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'GEOHASH': lambda r: list(map(nativestr_or_none, r)),
'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),
float(ll[1]))
if ll is not None else None, r)),
'GEORADIUS': parse_georadius_generic,
'GEORADIUSBYMEMBER': parse_georadius_generic,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'HSCAN': parse_hscan,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'MEMORY PURGE': bool_ok,
'MEMORY USAGE': int_or_none,
'OBJECT': parse_object,
'PING': lambda r: nativestr(r) == 'PONG',
'PUBSUB NUMSUB': parse_pubsub_numsub,
'RANDOMKEY': lambda r: r and r or None,
'SCAN': parse_scan,
'SCRIPT EXISTS': lambda r: list(imap(bool, r)),
'SCRIPT FLUSH': bool_ok,
'SCRIPT KILL': bool_ok,
'SCRIPT LOAD': nativestr,
'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
'SENTINEL MASTER': parse_sentinel_master,
'SENTINEL MASTERS': parse_sentinel_masters,
'SENTINEL MONITOR': bool_ok,
'SENTINEL REMOVE': bool_ok,
'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
'SENTINEL SET': bool_ok,
'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
'SET': lambda r: r and nativestr(r) == 'OK',
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'SSCAN': parse_scan,
'TIME': lambda x: (int(x[0]), int(x[1])),
'XCLAIM': parse_xclaim,
'XGROUP CREATE': bool_ok,
'XGROUP DELCONSUMER': int,
'XGROUP DESTROY': bool,
'XGROUP SETID': bool_ok,
'XINFO CONSUMERS': parse_list_of_dicts,
'XINFO GROUPS': parse_list_of_dicts,
'XINFO STREAM': parse_xinfo_stream,
'XPENDING': parse_xpending,
'ZADD': parse_zadd,
'ZSCAN': parse_zscan,
}
)
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a Redis client object configured from the given URL
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<http://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates a
SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None, socket_keepalive_options=None,
connection_pool=None, unix_socket_path=None,
encoding='utf-8', encoding_errors='strict',
charset=None, errors=None,
decode_responses=False, retry_on_timeout=False,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs='required', ssl_ca_certs=None,
max_connections=None, single_connection_client=False,
health_check_interval=0):
if not connection_pool:
if charset is not None:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
encoding = charset
if errors is not None:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
encoding_errors = errors
kwargs = {
'db': db,
'password': password,
'socket_timeout': socket_timeout,
'encoding': encoding,
'encoding_errors': encoding_errors,
'decode_responses': decode_responses,
'retry_on_timeout': retry_on_timeout,
'max_connections': max_connections,
'health_check_interval': health_check_interval,
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
# TCP specific options
kwargs.update({
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
})
if ssl:
kwargs.update({
'connection_class': SSLConnection,
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self.connection = None
if single_connection_client:
self.connection = self.connection_pool.get_connection('_')
self.response_callbacks = CaseInsensitiveDict(
self.__class__.RESPONSE_CALLBACKS)
def __repr__(self):
return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while True:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
lock_class = Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
def monitor(self):
return Monitor(self.connection_pool)
def client(self):
return self.__class__(connection_pool=self.connection_pool,
single_connection_client=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
conn = self.connection
if conn:
self.connection = None
self.connection_pool.release(conn)
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
conn = self.connection or pool.get_connection(command_name, **options)
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
raise
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
finally:
if not self.connection:
pool.release(conn)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
try:
response = connection.read_response()
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE]
raise
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
# SERVER INFORMATION
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def client_kill(self, address):
"Disconnects the client at ``address`` (ip:port)"
return self.execute_command('CLIENT KILL', address)
def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None):
"""
Disconnects client(s) using a variety of filter options
:param id: Kills a client by its unique ID field
:param type: Kills a client by type where type is one of 'normal',
'master', 'slave' or 'pubsub'
:param addr: Kills a client by its 'address:port'
:param skipme: If True, then the client calling the command
will not get killed even if it is identified by one of the filter
options. If skipme is not provided, the server defaults to skipme=True
"""
args = []
if _type is not None:
client_types = ('normal', 'master', 'slave', 'pubsub')
if str(_type).lower() not in client_types:
raise DataError("CLIENT KILL type must be one of %r" % (
client_types,))
args.extend((b'TYPE', _type))
if skipme is not None:
if not isinstance(skipme, bool):
raise DataError("CLIENT KILL skipme must be a bool")
if skipme:
args.extend((b'SKIPME', b'YES'))
else:
args.extend((b'SKIPME', b'NO'))
if _id is not None:
args.extend((b'ID', _id))
if addr is not None:
args.extend((b'ADDR', addr))
if not args:
raise DataError("CLIENT KILL <filter> <value> ... ... <filter> "
"<value> must specify at least one filter")
return self.execute_command('CLIENT KILL', *args)
def client_list(self, _type=None):
"""
Returns a list of currently connected clients.
If type of client specified, only that type will be returned.
:param _type: optional. one of the client types (normal, master,
replica, pubsub)
"""
"Returns a list of currently connected clients"
if _type is not None:
client_types = ('normal', 'master', 'replica', 'pubsub')
if str(_type).lower() not in client_types:
raise DataError("CLIENT LIST _type must be one of %r" % (
client_types,))
return self.execute_command('CLIENT LIST', b'TYPE', _type)
return self.execute_command('CLIENT LIST')
def client_getname(self):
"Returns the current connection name"
return self.execute_command('CLIENT GETNAME')
def client_id(self):
"Returns the current connection id"
return self.execute_command('CLIENT ID')
def client_setname(self, name):
"Sets the current connection name"
return self.execute_command('CLIENT SETNAME', name)
def client_unblock(self, client_id, error=False):
"""
Unblocks a connection by its client id.
If ``error`` is True, unblocks the client with a special error message.
If ``error`` is False (default), the client is unblocked using the
regular timeout mechanism.
"""
args = ['CLIENT UNBLOCK', int(client_id)]
if error:
args.append(b'ERROR')
return self.execute_command(*args)
def client_pause(self, timeout):
"""
Suspend all the Redis clients for the specified amount of time
:param timeout: milliseconds to pause clients
"""
if not isinstance(timeout, (int, long)):
raise DataError("CLIENT PAUSE timeout must be an integer")
return self.execute_command('CLIENT PAUSE', str(timeout))
def readwrite(self):
"Disables read queries for a connection to a Redis Cluster slave node"
return self.execute_command('READWRITE')
def readonly(self):
"Enables read queries for a connection to a Redis Cluster replica node"
return self.execute_command('READONLY')
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG GET', pattern)
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG SET', name, value)
def config_resetstat(self):
"Reset runtime statistics"
return self.execute_command('CONFIG RESETSTAT')
def config_rewrite(self):
"Rewrite config file with the minimal change to reflect running config"
return self.execute_command('CONFIG REWRITE')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def debug_object(self, key):
"Returns version specific meta information about a given key"
return self.execute_command('DEBUG OBJECT', key)
def echo(self, value):
"Echo the string back from the server"
return self.execute_command('ECHO', value)
def flushall(self, asynchronous=False):
"""
Delete all keys in all databases on the current host.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(b'ASYNC')
return self.execute_command('FLUSHALL', *args)
def flushdb(self, asynchronous=False):
"""
Delete all keys in the current database.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(b'ASYNC')
return self.execute_command('FLUSHDB', *args)
def swapdb(self, first, second):
"Swap two databases"
return self.execute_command('SWAPDB', first, second)
def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def migrate(self, host, port, keys, destination_db, timeout,
copy=False, replace=False, auth=None):
"""
Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
The ``timeout``, specified in milliseconds, indicates the maximum
time the connection between the two servers can be idle before the
command is interrupted.
If ``copy`` is True, the specified ``keys`` are NOT deleted from
the source server.
If ``replace`` is True, this operation will overwrite the keys
on the destination server if they exist.
If ``auth`` is specified, authenticate to the destination server with
the password provided.
"""
keys = list_or_args(keys, [])
if not keys:
raise DataError('MIGRATE requires at least one key')
pieces = []
if copy:
pieces.append(b'COPY')
if replace:
pieces.append(b'REPLACE')
if auth:
pieces.append(b'AUTH')
pieces.append(auth)
pieces.append(b'KEYS')
pieces.extend(keys)
return self.execute_command('MIGRATE', host, port, '', destination_db,
timeout, *pieces)
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
def memory_usage(self, key, samples=None):
"""
Return the total memory usage for key, its value and associated
administrative overheads.
For nested data structures, ``samples`` is the number of elements to
sample. If left unspecified, the server's default is 5. Use 0 to sample
all elements.
"""
args = []
if isinstance(samples, int):
args.extend([b'SAMPLES', samples])
return self.execute_command('MEMORY USAGE', key, *args)
def memory_purge(self):
"Attempts to purge dirty pages for reclamation by allocator"
return self.execute_command('MEMORY PURGE')
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
warnings.warn(
DeprecationWarning('Use the individual sentinel_* methods'))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
return self.execute_command('SENTINEL MASTER', service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
return self.execute_command('SENTINEL MASTERS')
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
return self.execute_command('SENTINEL REMOVE', name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
return self.execute_command('SENTINEL SENTINELS', service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
return self.execute_command('SENTINEL SET', name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
return self.execute_command('SENTINEL SLAVES', service_name)
def shutdown(self, save=False, nosave=False):
"""Shutdown the Redis server. If Redis has persistence configured,
data will be flushed before shutdown. If the "save" option is set,
a data flush will be attempted even if there is no persistence
configured. If the "nosave" option is set, no data flush will be
attempted. The "save" and "nosave" options cannot both be set.
"""
if save and nosave:
raise DataError('SHUTDOWN save and nosave cannot both be set')
args = ['SHUTDOWN']
if save:
args.append('SAVE')
if nosave:
args.append('NOSAVE')
try:
self.execute_command(*args)
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command('SLAVEOF', b'NO', b'ONE')
return self.execute_command('SLAVEOF', host, port)
def slowlog_get(self, num=None):
"""
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
return self.execute_command(*args)
def slowlog_len(self):
"Get the number of items in the slowlog"
return self.execute_command('SLOWLOG LEN')
def slowlog_reset(self):
"Remove all items in the slowlog"
return self.execute_command('SLOWLOG RESET')
def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return self.execute_command('TIME')
def wait(self, num_replicas, timeout):
"""
Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.
"""
return self.execute_command('WAIT', num_replicas, timeout)
# BASIC KEY COMMANDS
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def bitcount(self, key, start=None, end=None):
"""
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider
"""
params = [key]
if start is not None and end is not None:
params.append(start)
params.append(end)
elif (start is not None and end is None) or \
(end is not None and start is None):
raise DataError("Both start and end must be specified")
return self.execute_command('BITCOUNT', *params)
def bitfield(self, key, default_overflow=None):
"""
Return a BitFieldOperation instance to conveniently construct one or
more bitfield operations on ``key``.
"""
return BitFieldOperation(self, key, default_overflow=default_overflow)
def bitop(self, operation, dest, *keys):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
return self.execute_command('BITOP', operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise DataError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
# An alias for ``decr()``, because it is already implemented
# as DECRBY redis command.
return self.decrby(name, amount)
def decrby(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
def __delitem__(self, name):
self.delete(name)
def dump(self, name):
"""
Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.
"""
return self.execute_command('DUMP', name)
def exists(self, *names):
"Returns the number of ``names`` that exist"
return self.execute_command('EXISTS', *names)
__contains__ = exists
def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value is not None:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getrange(self, key, start, end):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
return self.execute_command('GETRANGE', key, start, end)
def getset(self, name, value):
"""
Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.incrby(name, amount)
def incrby(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
return self.execute_command('INCRBY', name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``
"""
return self.execute_command('INCRBYFLOAT', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
options = {}
if not args:
options[EMPTY_RESPONSE] = []
return self.execute_command('MGET', *args, **options)
def mset(self, mapping):
"""
Sets key/values based on a mapping. Mapping is a dictionary of
key/value pairs. Both keys and values should be strings or types that
can be cast to a string via str().
"""
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, mapping):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping is a dictionary of key/value pairs. Both keys and values
should be strings or types that can be cast to a string via str().
Returns a boolean indicating if the operation was successful.
"""
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds() * 1000)
return self.execute_command('PEXPIRE', name, time)
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
return self.execute_command('PEXPIREAT', name, when)
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
time_ms = int(time_ms.total_seconds() * 1000)
return self.execute_command('PSETEX', name, time_ms, value)
def pttl(self, name):
"Returns the number of milliseconds until the key ``name`` will expire"
return self.execute_command('PTTL', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def restore(self, name, ttl, value, replace=False):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
params = [name, ttl, value]
if replace:
params.append('REPLACE')
return self.execute_command('RESTORE', *params)
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
"""
Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` only
if it does not exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` only
if it already exists.
"""
pieces = [name, value]
if ex is not None:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = int(ex.total_seconds())
pieces.append(ex)
if px is not None:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
px = int(px.total_seconds() * 1000)
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
return self.execute_command('SET', *pieces)
def __setitem__(self, name, value):
self.set(name, value)
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def touch(self, *args):
"""
Alters the last access time of a key(s) ``*args``. A key is ignored
if it does not exist.
"""
return self.execute_command('TOUCH', *args)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(
DeprecationWarning('Call UNWATCH from a Pipeline object'))
def unlink(self, *names):
"Unlink one or more keys specified by ``names``"
return self.execute_command('UNLINK', *names)
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to RPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(b'BY')
pieces.append(by)
if start is not None and num is not None:
pieces.append(b'LIMIT')
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, (bytes, basestring)):
pieces.append(b'GET')
pieces.append(get)
else:
for g in get:
pieces.append(b'GET')
pieces.append(g)
if desc:
pieces.append(b'DESC')
if alpha:
pieces.append(b'ALPHA')
if store is not None:
pieces.append(b'STORE')
pieces.append(store)
if groups:
if not get or isinstance(get, (bytes, basestring)) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
# SCAN COMMANDS
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
return self.execute_command('SCAN', *pieces)
def scan_iter(self, match=None, count=None):
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
return self.execute_command('SSCAN', *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def hscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
return self.execute_command('HSCAN', *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item
# SET COMMANDS
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SDIFF', *args)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *args)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SINTER', *args)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *args)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name, count=None):
"Remove and return a random member of set ``name``"
args = (count is not None) and [count] or []
return self.execute_command('SPOP', name, *args)
def srandmember(self, name, number=None):
"""
If ``number`` is None, returns a random member of set ``name``.
If ``number`` is supplied, returns a list of ``number`` random
memebers of set ``name``. Note this is only available when running
Redis 2.6+.
"""
args = (number is not None) and [number] or []
return self.execute_command('SRANDMEMBER', name, *args)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SUNION', *args)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
# STREAMS COMMANDS
def xack(self, name, groupname, *ids):
"""
Acknowledges the successful processing of one or more messages.
name: name of the stream.
groupname: name of the consumer group.
*ids: message ids to acknowlege.
"""
return self.execute_command('XACK', name, groupname, *ids)
def xadd(self, name, fields, id='*', maxlen=None, approximate=True):
"""
Add to a stream.
name: name of the stream
fields: dict of field/value pairs to insert into the stream
id: Location to insert this record. By default it is appended.
maxlen: truncate old stream members beyond this size
approximate: actual stream length may be slightly more than maxlen
"""
pieces = []
if maxlen is not None:
if not isinstance(maxlen, (int, long)) or maxlen < 1:
raise DataError('XADD maxlen must be a positive integer')
pieces.append(b'MAXLEN')
if approximate:
pieces.append(b'~')
pieces.append(str(maxlen))
pieces.append(id)
if not isinstance(fields, dict) or len(fields) == 0:
raise DataError('XADD fields must be a non-empty dict')
for pair in iteritems(fields):
pieces.extend(pair)
return self.execute_command('XADD', name, *pieces)
def xclaim(self, name, groupname, consumername, min_idle_time, message_ids,
idle=None, time=None, retrycount=None, force=False,
justid=False):
"""
Changes the ownership of a pending message.
name: name of the stream.
groupname: name of the consumer group.
consumername: name of a consumer that claims the message.
min_idle_time: filter messages that were idle less than this amount of
milliseconds
message_ids: non-empty list or tuple of message IDs to claim
idle: optional. Set the idle time (last time it was delivered) of the
message in ms
time: optional integer. This is the same as idle but instead of a
relative amount of milliseconds, it sets the idle time to a specific
Unix time (in milliseconds).
retrycount: optional integer. set the retry counter to the specified
value. This counter is incremented every time a message is delivered
again.
force: optional boolean, false by default. Creates the pending message
entry in the PEL even if certain specified IDs are not already in the
PEL assigned to a different client.
justid: optional boolean, false by default. Return just an array of IDs
of messages successfully claimed, without returning the actual message
"""
if not isinstance(min_idle_time, (int, long)) or min_idle_time < 0:
raise DataError("XCLAIM min_idle_time must be a non negative "
"integer")
if not isinstance(message_ids, (list, tuple)) or not message_ids:
raise DataError("XCLAIM message_ids must be a non empty list or "
"tuple of message IDs to claim")
kwargs = {}
pieces = [name, groupname, consumername, str(min_idle_time)]
pieces.extend(list(message_ids))
if idle is not None:
if not isinstance(idle, (int, long)):
raise DataError("XCLAIM idle must be an integer")
pieces.extend((b'IDLE', str(idle)))
if time is not None:
if not isinstance(time, (int, long)):
raise DataError("XCLAIM time must be an integer")
pieces.extend((b'TIME', str(time)))
if retrycount is not None:
if not isinstance(retrycount, (int, long)):
raise DataError("XCLAIM retrycount must be an integer")
pieces.extend((b'RETRYCOUNT', str(retrycount)))
if force:
if not isinstance(force, bool):
raise DataError("XCLAIM force must be a boolean")
pieces.append(b'FORCE')
if justid:
if not isinstance(justid, bool):
raise DataError("XCLAIM justid must be a boolean")
pieces.append(b'JUSTID')
kwargs['parse_justid'] = True
return self.execute_command('XCLAIM', *pieces, **kwargs)
def xdel(self, name, *ids):
"""
Deletes one or more messages from a stream.
name: name of the stream.
*ids: message ids to delete.
"""
return self.execute_command('XDEL', name, *ids)
def xgroup_create(self, name, groupname, id='$', mkstream=False):
"""
Create a new consumer group associated with a stream.
name: name of the stream.
groupname: name of the consumer group.
id: ID of the last item in the stream to consider already delivered.
"""
pieces = ['XGROUP CREATE', name, groupname, id]
if mkstream:
pieces.append(b'MKSTREAM')
return self.execute_command(*pieces)
def xgroup_delconsumer(self, name, groupname, consumername):
"""
Remove a specific consumer from a consumer group.
Returns the number of pending messages that the consumer had before it
was deleted.
name: name of the stream.
groupname: name of the consumer group.
consumername: name of consumer to delete
"""
return self.execute_command('XGROUP DELCONSUMER', name, groupname,
consumername)
def xgroup_destroy(self, name, groupname):
"""
Destroy a consumer group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XGROUP DESTROY', name, groupname)
def xgroup_setid(self, name, groupname, id):
"""
Set the consumer group last delivered ID to something else.
name: name of the stream.
groupname: name of the consumer group.
id: ID of the last item in the stream to consider already delivered.
"""
return self.execute_command('XGROUP SETID', name, groupname, id)
def xinfo_consumers(self, name, groupname):
"""
Returns general information about the consumers in the group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XINFO CONSUMERS', name, groupname)
def xinfo_groups(self, name):
"""
Returns general information about the consumer groups of the stream.
name: name of the stream.
"""
return self.execute_command('XINFO GROUPS', name)
def xinfo_stream(self, name):
"""
Returns general information about the stream.
name: name of the stream.
"""
return self.execute_command('XINFO STREAM', name)
def xlen(self, name):
"""
Returns the number of elements in a given stream.
"""
return self.execute_command('XLEN', name)
def xpending(self, name, groupname):
"""
Returns information about pending messages of a group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XPENDING', name, groupname)
def xpending_range(self, name, groupname, min, max, count,
consumername=None):
"""
Returns information about pending messages, in a range.
name: name of the stream.
groupname: name of the consumer group.
min: minimum stream ID.
max: maximum stream ID.
count: number of messages to return
consumername: name of a consumer to filter by (optional).
"""
pieces = [name, groupname]
if min is not None or max is not None or count is not None:
if min is None or max is None or count is None:
raise DataError("XPENDING must be provided with min, max "
"and count parameters, or none of them. ")
if not isinstance(count, (int, long)) or count < -1:
raise DataError("XPENDING count must be a integer >= -1")
pieces.extend((min, max, str(count)))
if consumername is not None:
if min is None or max is None or count is None:
raise DataError("if XPENDING is provided with consumername,"
" it must be provided with min, max and"
" count parameters")
pieces.append(consumername)
return self.execute_command('XPENDING', *pieces, parse_detail=True)
def xrange(self, name, min='-', max='+', count=None):
"""
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
"""
pieces = [min, max]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XRANGE count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
return self.execute_command('XRANGE', name, *pieces)
def xread(self, streams, count=None, block=None):
"""
Block and monitor multiple streams for new data.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present.
"""
pieces = []
if block is not None:
if not isinstance(block, (int, long)) or block < 0:
raise DataError('XREAD block must be a non-negative integer')
pieces.append(b'BLOCK')
pieces.append(str(block))
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XREAD count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError('XREAD streams must be a non empty dict')
pieces.append(b'STREAMS')
keys, values = izip(*iteritems(streams))
pieces.extend(keys)
pieces.extend(values)
return self.execute_command('XREAD', *pieces)
def xreadgroup(self, groupname, consumername, streams, count=None,
block=None, noack=False):
"""
Read from a stream via a consumer group.
groupname: name of the consumer group.
consumername: name of the requesting consumer.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present.
noack: do not add messages to the PEL
"""
pieces = [b'GROUP', groupname, consumername]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError("XREADGROUP count must be a positive integer")
pieces.append(b'COUNT')
pieces.append(str(count))
if block is not None:
if not isinstance(block, (int, long)) or block < 0:
raise DataError("XREADGROUP block must be a non-negative "
"integer")
pieces.append(b'BLOCK')
pieces.append(str(block))
if noack:
pieces.append(b'NOACK')
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError('XREADGROUP streams must be a non empty dict')
pieces.append(b'STREAMS')
pieces.extend(streams.keys())
pieces.extend(streams.values())
return self.execute_command('XREADGROUP', *pieces)
def xrevrange(self, name, max='+', min='-', count=None):
"""
Read stream values within an interval, in reverse order.
name: name of the stream
start: first stream ID. defaults to '+',
meaning the latest available.
finish: last stream ID. defaults to '-',
meaning the earliest available.
count: if set, only return this many items, beginning with the
latest available.
"""
pieces = [max, min]
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XREVRANGE count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
return self.execute_command('XREVRANGE', name, *pieces)
def xtrim(self, name, maxlen, approximate=True):
"""
Trims old messages from a stream.
name: name of the stream.
maxlen: truncate old stream messages beyond this size
approximate: actual stream length may be slightly more than maxlen
"""
pieces = [b'MAXLEN']
if approximate:
pieces.append(b'~')
pieces.append(maxlen)
return self.execute_command('XTRIM', name, *pieces)
# SORTED SET COMMANDS
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False):
"""
Set any number of element-name, score pairs to the key ``name``. Pairs
are specified as a dict of element-names keys to score values.
``nx`` forces ZADD to only create new elements and not to update
scores for elements that already exist.
``xx`` forces ZADD to only update scores of elements that already
exist. New elements will not be added.
``ch`` modifies the return value to be the numbers of elements changed.
Changed elements include new elements that were added and elements
whose scores changed.
``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a
single element/score pair can be specified and the score is the amount
the existing score will be incremented by. When using this mode the
return value of ZADD will be the new score of the element.
The return value of ZADD varies based on the mode specified. With no
options, ZADD returns the number of new elements added to the sorted
set.
"""
if not mapping:
raise DataError("ZADD requires at least one element/score pair")
if nx and xx:
raise DataError("ZADD allows either 'nx' or 'xx', not both")
if incr and len(mapping) != 1:
raise DataError("ZADD option 'incr' only works when passing a "
"single element/score pair")
pieces = []
options = {}
if nx:
pieces.append(b'NX')
if xx:
pieces.append(b'XX')
if ch:
pieces.append(b'CH')
if incr:
pieces.append(b'INCR')
options['as_score'] = True
for pair in iteritems(mapping):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces, **options)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, amount, value):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zpopmax(self, name, count=None):
"""
Remove and return up to ``count`` members with the highest scores
from the sorted set ``name``.
"""
args = (count is not None) and [count] or []
options = {
'withscores': True
}
return self.execute_command('ZPOPMAX', name, *args, **options)
def zpopmin(self, name, count=None):
"""
Remove and return up to ``count`` members with the lowest scores
from the sorted set ``name``.
"""
args = (count is not None) and [count] or []
options = {
'withscores': True
}
return self.execute_command('ZPOPMIN', name, *args, **options)
def bzpopmax(self, keys, timeout=0):
"""
ZPOPMAX a value off of the first non-empty sorted set
named in the ``keys`` list.
If none of the sorted sets in ``keys`` has a value to ZPOPMAX,
then block for ``timeout`` seconds, or until a member gets added
to one of the sorted sets.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BZPOPMAX', *keys)
def bzpopmin(self, keys, timeout=0):
"""
ZPOPMIN a value off of the first non-empty sorted set
named in the ``keys`` list.
If none of the sorted sets in ``keys`` has a value to ZPOPMIN,
then block for ``timeout`` seconds, or until a member gets added
to one of the sorted sets.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BZPOPMIN', *keys)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
return self.execute_command(*pieces)
def zrevrangebylex(self, name, max, min, start=None, num=None):
"""
Return the reversed lexicographical range of values from sorted set
``name`` between ``max`` and ``min``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYLEX', name, max, min]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = iterkeys(keys), itervalues(keys)
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(b'WEIGHTS')
pieces.extend(weights)
if aggregate:
pieces.append(b'AGGREGATE')
pieces.append(aggregate)
return self.execute_command(*pieces)
# HYPERLOGLOG COMMANDS
def pfadd(self, name, *values):
"Adds the specified elements to the specified HyperLogLog."
return self.execute_command('PFADD', name, *values)
def pfcount(self, *sources):
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
"""
return self.execute_command('PFCOUNT', *sources)
def pfmerge(self, dest, *sources):
"Merge N different HyperLogLogs into a single one."
return self.execute_command('PFMERGE', dest, *sources)
# HASH COMMANDS
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
return self.execute_command('HINCRBYFLOAT', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return self.execute_command('HSET', name, key, value)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command('HSETNX', name, key, value)
def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def hstrlen(self, name, key):
"""
Return the number of bytes stored in the value of ``key``
within hash ``name``
"""
return self.execute_command('HSTRLEN', name, key)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def pubsub_channels(self, pattern='*'):
"""
Return a list of channels that have at least one subscriber
"""
return self.execute_command('PUBSUB CHANNELS', pattern)
def pubsub_numpat(self):
"""
Returns the number of subscriptions to patterns
"""
return self.execute_command('PUBSUB NUMPAT')
def pubsub_numsub(self, *args):
"""
Return a list of (channel, number of subscribers) tuples
for each channel given in ``*args``
"""
return self.execute_command('PUBSUB NUMSUB', *args)
def cluster(self, cluster_arg, *args):
return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
return self.execute_command('SCRIPT EXISTS', *args)
def script_flush(self):
"Flush all scripts from the script cache"
return self.execute_command('SCRIPT FLUSH')
def script_kill(self):
"Kill the currently executing Lua script"
return self.execute_command('SCRIPT KILL')
def script_load(self, script):
"Load a Lua ``script`` into the script cache. Returns the SHA."
return self.execute_command('SCRIPT LOAD', script)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
# GEO COMMANDS
def geoadd(self, name, *values):
"""
Add the specified geospatial items to the specified key identified
by the ``name`` argument. The Geospatial items are given as ordered
members of the ``values`` argument, each item or place is formed by
the triad longitude, latitude and name.
"""
if len(values) % 3 != 0:
raise DataError("GEOADD requires places with lon, lat and name"
" values")
return self.execute_command('GEOADD', name, *values)
def geodist(self, name, place1, place2, unit=None):
"""
Return the distance between ``place1`` and ``place2`` members of the
``name`` key.
The units must be one of the following : m, km mi, ft. By default
meters are used.
"""
pieces = [name, place1, place2]
if unit and unit not in ('m', 'km', 'mi', 'ft'):
raise DataError("GEODIST invalid unit")
elif unit:
pieces.append(unit)
return self.execute_command('GEODIST', *pieces)
def geohash(self, name, *values):
"""
Return the geo hash string for each item of ``values`` members of
the specified key identified by the ``name`` argument.
"""
return self.execute_command('GEOHASH', name, *values)
def geopos(self, name, *values):
"""
Return the positions of each item of ``values`` as members of
the specified key identified by the ``name`` argument. Each position
is represented by the pairs lon and lat.
"""
return self.execute_command('GEOPOS', name, *values)
def georadius(self, name, longitude, latitude, radius, unit=None,
withdist=False, withcoord=False, withhash=False, count=None,
sort=None, store=None, store_dist=None):
"""
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
"""
return self._georadiusgeneric('GEORADIUS',
name, longitude, latitude, radius,
unit=unit, withdist=withdist,
withcoord=withcoord, withhash=withhash,
count=count, sort=sort, store=store,
store_dist=store_dist)
def georadiusbymember(self, name, member, radius, unit=None,
withdist=False, withcoord=False, withhash=False,
count=None, sort=None, store=None, store_dist=None):
"""
This command is exactly like ``georadius`` with the sole difference
that instead of taking, as the center of the area to query, a longitude
and latitude value, it takes the name of a member already existing
inside the geospatial index represented by the sorted set.
"""
return self._georadiusgeneric('GEORADIUSBYMEMBER',
name, member, radius, unit=unit,
withdist=withdist, withcoord=withcoord,
withhash=withhash, count=count,
sort=sort, store=store,
store_dist=store_dist)
def _georadiusgeneric(self, command, *args, **kwargs):
pieces = list(args)
if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'):
raise DataError("GEORADIUS invalid unit")
elif kwargs['unit']:
pieces.append(kwargs['unit'])
else:
pieces.append('m',)
for arg_name, byte_repr in (
('withdist', b'WITHDIST'),
('withcoord', b'WITHCOORD'),
('withhash', b'WITHHASH')):
if kwargs[arg_name]:
pieces.append(byte_repr)
if kwargs['count']:
pieces.extend([b'COUNT', kwargs['count']])
if kwargs['sort']:
if kwargs['sort'] == 'ASC':
pieces.append(b'ASC')
elif kwargs['sort'] == 'DESC':
pieces.append(b'DESC')
else:
raise DataError("GEORADIUS invalid sort")
if kwargs['store'] and kwargs['store_dist']:
raise DataError("GEORADIUS store and store_dist cant be set"
" together")
if kwargs['store']:
pieces.extend([b'STORE', kwargs['store']])
if kwargs['store_dist']:
pieces.extend([b'STOREDIST', kwargs['store_dist']])
return self.execute_command(command, *pieces, **kwargs)
StrictRedis = Redis
class Monitor(object):
"""
Monitor is useful for handling the MONITOR command to the redis server.
next_command() method returns one command from monitor
listen() method yields commands from monitor.
"""
monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)')
command_re = re.compile(r'"(.*?)(?<!\\)"')
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = self.connection_pool.get_connection('MONITOR')
def __enter__(self):
self.connection.send_command('MONITOR')
# check that monitor returns 'OK', but don't return it to user
response = self.connection.read_response()
if not bool_ok(response):
raise RedisError('MONITOR failed: %s' % response)
return self
def __exit__(self, *args):
self.connection.disconnect()
self.connection_pool.release(self.connection)
def next_command(self):
"Parse the response from a monitor command"
response = self.connection.read_response()
if isinstance(response, bytes):
response = self.connection.encoder.decode(response, force=True)
command_time, command_data = response.split(' ', 1)
m = self.monitor_re.match(command_data)
db_id, client_info, command = m.groups()
command = ' '.join(self.command_re.findall(command))
command = command.replace('\\"', '"').replace('\\\\', '\\')
if client_info == 'lua':
client_address = 'lua'
client_port = ''
client_type = 'lua'
elif client_info.startswith('unix'):
client_address = 'unix'
client_port = client_info[5:]
client_type = 'unix'
else:
# use rsplit as ipv6 addresses contain colons
client_address, client_port = client_info.rsplit(':', 1)
client_type = 'tcp'
return {
'time': float(command_time),
'db': int(db_id),
'client_address': client_address,
'client_port': client_port,
'client_type': client_type,
'command': command
}
def listen(self):
"Listen for commands coming to the server."
while True:
yield self.next_command()
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
HEALTH_CHECK_MESSAGE = 'redis-py-health-check'
def __init__(self, connection_pool, shard_hint=None,
ignore_subscribe_messages=False):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
self.encoder = self.connection_pool.get_encoder()
if self.encoder.decode_responses:
self.health_check_response = ['pong', self.HEALTH_CHECK_MESSAGE]
else:
self.health_check_response = [
b'pong',
self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
]
self.reset()
def __del__(self):
try:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
self.reset()
except Exception:
pass
def reset(self):
if self.connection:
self.connection.disconnect()
self.connection.clear_connect_callbacks()
self.connection_pool.release(self.connection)
self.connection = None
self.channels = {}
self.pending_unsubscribe_channels = set()
self.patterns = {}
self.pending_unsubscribe_patterns = set()
def close(self):
self.reset()
def on_connect(self, connection):
"Re-subscribe to any channels and patterns previously subscribed to"
# NOTE: for python3, we can't pass bytestrings as keyword arguments
# so we need to decode channel/pattern names back to unicode strings
# before passing them to [p]subscribe.
self.pending_unsubscribe_channels.clear()
self.pending_unsubscribe_patterns.clear()
if self.channels:
channels = {}
for k, v in iteritems(self.channels):
channels[self.encoder.decode(k, force=True)] = v
self.subscribe(**channels)
if self.patterns:
patterns = {}
for k, v in iteritems(self.patterns):
patterns[self.encoder.decode(k, force=True)] = v
self.psubscribe(**patterns)
@property
def subscribed(self):
"Indicates if there are subscriptions to any channels or patterns"
return bool(self.channels or self.patterns)
def execute_command(self, *args):
"Execute a publish/subscribe command"
# NOTE: don't parse the response in this function -- it could pull a
# legitimate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
kwargs = {'check_health': not self.subscribed}
self._execute(connection, connection.send_command, *args, **kwargs)
def _execute(self, connection, command, *args, **kwargs):
try:
return command(*args, **kwargs)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not (connection.retry_on_timeout and
isinstance(e, TimeoutError)):
raise
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection.connect()
# the ``on_connect`` callback should haven been called by the
# connection to resubscribe us to any channels and patterns we were
# previously listening to
return command(*args, **kwargs)
def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
conn = self.connection
if conn is None:
raise RuntimeError(
'pubsub connection not set: '
'did you forget to call subscribe() or psubscribe()?')
self.check_health()
if not block and not conn.can_read(timeout=timeout):
return None
response = self._execute(conn, conn.read_response)
if conn.health_check_interval and \
response == self.health_check_response:
# ignore the health check message as user might not expect it
return None
return response
def check_health(self):
conn = self.connection
if conn is None:
raise RuntimeError(
'pubsub connection not set: '
'did you forget to call subscribe() or psubscribe()?')
if conn.health_check_interval and time.time() > conn.next_health_check:
conn.send_command('PING', self.HEALTH_CHECK_MESSAGE,
check_health=False)
def _normalize_keys(self, data):
"""
normalize channel/pattern names to be either bytes or strings
based on whether responses are automatically decoded. this saves us
from coercing the value for each message coming in.
"""
encode = self.encoder.encode
decode = self.encoder.decode
return {decode(encode(k)): v for k, v in iteritems(data)}
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = dict.fromkeys(args)
new_patterns.update(kwargs)
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
new_patterns = self._normalize_keys(new_patterns)
self.patterns.update(new_patterns)
self.pending_unsubscribe_patterns.difference_update(new_patterns)
return ret_val
def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empty, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
patterns = self._normalize_keys(dict.fromkeys(args))
else:
patterns = self.patterns
self.pending_unsubscribe_patterns.update(patterns)
return self.execute_command('PUNSUBSCRIBE', *args)
def subscribe(self, *args, **kwargs):
"""
Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_channels = dict.fromkeys(args)
new_channels.update(kwargs)
ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
new_channels = self._normalize_keys(new_channels)
self.channels.update(new_channels)
self.pending_unsubscribe_channels.difference_update(new_channels)
return ret_val
def unsubscribe(self, *args):
"""
Unsubscribe from the supplied channels. If empty, unsubscribe from
all channels
"""
if args:
args = list_or_args(args[0], args[1:])
channels = self._normalize_keys(dict.fromkeys(args))
else:
channels = self.channels
self.pending_unsubscribe_channels.update(channels)
return self.execute_command('UNSUBSCRIBE', *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscribed:
response = self.handle_message(self.parse_response(block=True))
if response is not None:
yield response
def get_message(self, ignore_subscribe_messages=False, timeout=0):
"""
Get the next message if one is available, otherwise None.
If timeout is specified, the system will wait for `timeout` seconds
before returning. Timeout should be specified as a floating point
number.
"""
response = self.parse_response(block=False, timeout=timeout)
if response:
return self.handle_message(response, ignore_subscribe_messages)
return None
def ping(self, message=None):
"""
Ping the Redis server
"""
message = '' if message is None else message
return self.execute_command('PING', message)
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
elif message_type == 'pong':
message = {
'type': message_type,
'pattern': None,
'channel': None,
'data': response[1]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
if message_type == 'punsubscribe':
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
self.patterns.pop(pattern, None)
else:
channel = response[1]
if channel in self.pending_unsubscribe_channels:
self.pending_unsubscribe_channels.remove(channel)
self.channels.pop(channel, None)
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
elif message_type != 'pong':
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
def run_in_thread(self, sleep_time=0, daemon=False):
for channel, handler in iteritems(self.channels):
if handler is None:
raise PubSubError("Channel: '%s' has no handler registered" %
channel)
for pattern, handler in iteritems(self.patterns):
if handler is None:
raise PubSubError("Pattern: '%s' has no handler registered" %
pattern)
thread = PubSubWorkerThread(self, sleep_time, daemon=daemon)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
def __init__(self, pubsub, sleep_time, daemon=False):
super(PubSubWorkerThread, self).__init__()
self.daemon = daemon
self.pubsub = pubsub
self.sleep_time = sleep_time
self._running = threading.Event()
def run(self):
if self._running.is_set():
return
self._running.set()
pubsub = self.pubsub
sleep_time = self.sleep_time
while self._running.is_set():
pubsub.get_message(ignore_subscribe_messages=True,
timeout=sleep_time)
pubsub.close()
def stop(self):
# trip the flag so the run loop exits. the run loop will
# close the pubsub connection, which disconnects the socket
# and returns the connection to the pool.
self._running.clear()
class Pipeline(Redis):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'}
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self):
return len(self.command_stack)
def reset(self):
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
# if we were already watching a variable, the watch is no longer
# valid since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
self.reset()
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
self.reset()
raise
# retry_on_timeout is set, this is a TimeoutError and we are not
# already WATCHing any variables. retry the command.
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError):
# a subsequent failure should simply be raised
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error):
cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
all_cmds = connection.pack_commands([args for args, options in cmds
if EMPTY_RESPONSE not in options])
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, '_')
except ResponseError:
errors.append((0, sys.exc_info()[1]))
# and all the other commands
for i, command in enumerate(commands):
if EMPTY_RESPONSE in command[1]:
errors.append((i, command[1][EMPTY_RESPONSE]))
else:
try:
self.parse_response(connection, '_')
except ResponseError:
ex = sys.exc_info()[1]
self.annotate_exception(ex, i + 1, command[0])
errors.append((i, ex))
# parse the EXEC.
try:
response = self.parse_response(connection, '_')
except ExecAbortError:
if self.explicit_transaction:
self.immediate_execute_command('DISCARD')
if errors:
raise errors[0][1]
raise sys.exc_info()[1]
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in izip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
response = []
for args, options in commands:
try:
response.append(
self.parse_response(connection, args[0], **options))
except ResponseError:
response.append(sys.exc_info()[1])
if raise_on_error:
self.raise_first_error(commands, response)
return response
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = ' '.join(imap(safe_unicode, command))
msg = 'Command # %d (%s) of pipeline caused error: %s' % (
number, cmd, safe_unicode(exception.args[0]))
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = Redis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate('SCRIPT EXISTS', *shas)
if not all(exists):
for s, exist in izip(scripts, exists):
if not exist:
s.sha = immediate('SCRIPT LOAD', s.script)
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI',
self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
raise
# retry a TimeoutError when retry_on_timeout is set
return execute(conn, stack, raise_on_error)
finally:
self.reset()
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
class Script(object):
"An executable Lua script object returned by ``register_script``"
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
# Precalculate and store the SHA1 hex digest of the script.
if isinstance(script, basestring):
# We need the encoding from the client in order to generate an
# accurate byte representation of the script
encoder = registered_client.connection_pool.get_encoder()
script = encoder.encode(script)
self.sha = hashlib.sha1(script).hexdigest()
def __call__(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, Pipeline):
# Make sure the pipeline can register the script before executing.
client.scripts.add(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
# Overwrite the sha just in case there was a discrepancy.
self.sha = client.script_load(self.script)
return client.evalsha(self.sha, len(keys), *args)
class BitFieldOperation(object):
"""
Command builder for BITFIELD commands.
"""
def __init__(self, client, key, default_overflow=None):
self.client = client
self.key = key
self._default_overflow = default_overflow
self.reset()
def reset(self):
"""
Reset the state of the instance to when it was constructed
"""
self.operations = []
self._last_overflow = 'WRAP'
self.overflow(self._default_overflow or self._last_overflow)
def overflow(self, overflow):
"""
Update the overflow algorithm of successive INCRBY operations
:param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the
Redis docs for descriptions of these algorithmsself.
:returns: a :py:class:`BitFieldOperation` instance.
"""
overflow = overflow.upper()
if overflow != self._last_overflow:
self._last_overflow = overflow
self.operations.append(('OVERFLOW', overflow))
return self
def incrby(self, fmt, offset, increment, overflow=None):
"""
Increment a bitfield by a given amount.
:param fmt: format-string for the bitfield being updated, e.g. 'u8'
for an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int increment: value to increment the bitfield by.
:param str overflow: overflow algorithm. Defaults to WRAP, but other
acceptable values are SAT and FAIL. See the Redis docs for
descriptions of these algorithms.
:returns: a :py:class:`BitFieldOperation` instance.
"""
if overflow is not None:
self.overflow(overflow)
self.operations.append(('INCRBY', fmt, offset, increment))
return self
def get(self, fmt, offset):
"""
Get the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('GET', fmt, offset))
return self
def set(self, fmt, offset, value):
"""
Set the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('SET', fmt, offset, value))
return self
@property
def command(self):
cmd = ['BITFIELD', self.key]
for ops in self.operations:
cmd.extend(ops)
return cmd
def execute(self):
"""
Execute the operation(s) in a single BITFIELD command. The return value
is a list of values corresponding to each operation. If the client
used to create this instance was a pipeline, the list of values
will be present within the pipeline's execute.
"""
command = self.command
self.reset()
return self.client.execute_command(*command)
|
9921a7921552d61727fb3788e096b8b475644310
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/totto/prepare_predictions_for_eval.py
|
3a0b2b734442be06ec331d734f7b7226c631ef5d
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
prepare_predictions_for_eval.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes references for evaluation (except for tokenization)."""
import json
import os
from absl import app
from absl import flags
import six
FLAGS = flags.FLAGS
flags.DEFINE_string("input_prediction_path", None, "Prediction txt file.")
flags.DEFINE_string("input_target_path", None, "Target json file.")
flags.DEFINE_string("output_dir", None, "Output directory.")
def write_predictions(predictions, output_path):
"""Write predictions to file."""
with open(output_path, "w", encoding="utf-8") as f:
for prediction in predictions:
if not prediction:
prediction = "<null>"
f.write(prediction.lower() + "\n")
def main(_):
input_prediction_path = FLAGS.input_prediction_path
input_target_path = FLAGS.input_target_path
output_dir = FLAGS.output_dir
predictions = []
overlap_predictions = []
nonoverlap_predictions = []
with open(input_prediction_path, "r", encoding="utf-8") as input_file:
for line in input_file:
line = line.strip()
predictions.append(line)
json_examples = []
with open(input_target_path, "r", encoding="utf-8") as input_file:
for line in input_file:
line = six.ensure_text(line, "utf-8")
json_example = json.loads(line)
json_examples.append(json_example)
assert len(predictions) == len(json_examples)
for index, prediction in enumerate(predictions):
json_example = json_examples[index]
if json_example["overlap_subset"]:
overlap_predictions.append(prediction)
else:
nonoverlap_predictions.append(prediction)
print("Writing predictions.")
all_output_path = os.path.join(output_dir, "predictions")
overlap_output_path = os.path.join(output_dir, "overlap_predictions")
nonoverlap_output_path = os.path.join(output_dir, "nonoverlap_predictions")
write_predictions(predictions, all_output_path)
write_predictions(overlap_predictions, overlap_output_path)
write_predictions(nonoverlap_predictions, nonoverlap_output_path)
if __name__ == "__main__":
flags.mark_flags_as_required(
["input_prediction_path", "input_target_path", "output_dir"])
app.run(main)
|
69aa94596a0bac67b88a3c76627d93a81f32ffb3
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/webhooks/netlify/view.py
|
e1616ed8e40f36e74d776eaf19a7759bb103901a
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
view.py
|
from typing import Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_string, to_wild_value
from zerver.lib.webhooks.common import (
check_send_webhook_message,
get_http_headers_from_filename,
validate_extract_webhook_http_header,
)
from zerver.models import UserProfile
ALL_EVENT_TYPES = [
"deploy_failed",
"deploy_locked",
"deploy_unlocked",
"deploy_building",
"deploy_created",
]
fixture_to_headers = get_http_headers_from_filename("HTTP_X_NETLIFY_EVENT")
@webhook_view("Netlify", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_netlify_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
) -> HttpResponse:
message_template, event = get_template(request, payload)
body = message_template.format(
build_name=payload["name"].tame(check_string),
build_url=payload["url"].tame(check_string),
branch_name=payload["branch"].tame(check_string),
state=payload["state"].tame(check_string),
)
topic = "{topic}".format(topic=payload["branch"].tame(check_string))
check_send_webhook_message(request, user_profile, topic, body, event)
return json_success(request)
def get_template(request: HttpRequest, payload: WildValue) -> Tuple[str, str]:
message_template = "The build [{build_name}]({build_url}) on branch {branch_name} "
event = validate_extract_webhook_http_header(request, "X-Netlify-Event", "Netlify")
if event == "deploy_failed":
message_template += payload["error_message"].tame(check_string)
elif event == "deploy_locked":
message_template += "is now locked."
elif event == "deploy_unlocked":
message_template += "is now unlocked."
elif event in ALL_EVENT_TYPES:
message_template += "is now {state}.".format(state=payload["state"].tame(check_string))
else:
raise UnsupportedWebhookEventTypeError(event)
return message_template, event
|
6839c497b4680c8da1177a49fabd0c361d3508f2
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/DLink/DxS_Cisco_CLI/get_inventory.py
|
956f5262b0c8a35234914d7cec84b925357725f3
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
get_inventory.py
|
# ---------------------------------------------------------------------
# DLink.DxS_Cisco_CLI.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "DLink.DxS_Cisco_CLI.get_inventory"
interface = IGetInventory
"""
DGS-3610#show slots
Dev Slot Port Max Ports Configured Module Online Module User Status Software Status
--- ---- ---- --------- ---------------------------- ---------------------------- ------------ ---------------
1 0 24 24 N/A DGS-3610-26G N/A ok
1 1 0 1 N/A none N/A none
1 2 0 1 N/A none N/A none
DGS-3610#show interfaces status
Interface Status Vlan Duplex Speed Type
------------------------ -------- ---- ------- --------- ------
GigabitEthernet 0/1 up 1 Full 1000M fiber
GigabitEthernet 0/2 up 1 Full 1000M fiber
"""
rx_dev = re.compile(
r"^\s+\d+\s+(?P<number>\d+)\s+\d+\s+\d+\s+\S+\s+(?P<part_no>\S+)", re.MULTILINE
)
rx_status = re.compile(
r"^(?:Ten)?GigabitEthernet \d+/(?P<number>\d+)\s+(?:up|down)\s+\d+\s+"
r"\S+\s+(?P<speed>1\d+M)\s+(?P<type>\S+)\s*\n",
re.MULTILINE,
)
def execute(self):
r = []
try:
s = self.cli("show slots")
except self.CLISyntaxError:
raise self.NotSupportedError()
for match in self.rx_dev.finditer(s):
number = match.group("number")
part_no = match.group("part_no")
if part_no == "none":
continue
if number == "0":
p = {"type": "CHASSIS", "vendor": "DLINK", "part_no": [part_no]}
serial = self.capabilities.get("Chassis | Serial Number")
if serial:
p["serial"] = serial
revision = self.capabilities.get("Chassis | HW Version")
if revision:
p["revision"] = revision
else:
p = {"type": "MODULE", "number": number, "vendor": "DLINK", "part_no": [part_no]}
r += [p]
s = self.cli("show interfaces status")
for match in self.rx_status.finditer(s):
if match.group("type") == "fiber":
if match.group("speed") == "1000M":
r += [
{
"type": "XCVR",
"number": match.group("number"),
"vendor": "NONAME",
"part_no": ["NoName | Transceiver | 1G | SFP"],
}
]
if match.group("speed") == "10000M":
r += [
{
"type": "XCVR",
"number": match.group("number"),
"vendor": "NONAME",
"part_no": ["NoName | Transceiver | 10G | XFP"],
}
]
return r
|
1d0387bd317c850782c01d80cbdd333319176fe5
|
d3031d50874a592cca6bbd6296a7f8aea734e48f
|
/Tests/check_large_memory_numpy.py
|
24cb1f722bf07d34ba3301fc0325f8f8193f893c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND"
] |
permissive
|
python-pillow/Pillow
|
97abc2f04646ae702aca971d59c738a8239a0053
|
601324bf8915a6180f5616c817e63e2e7816b892
|
refs/heads/main
| 2023-09-03T22:55:27.094813
| 2023-09-02T04:20:46
| 2023-09-02T04:20:46
| 5,171,600
| 10,807
| 2,470
|
NOASSERTION
| 2023-09-14T13:41:58
| 2012-07-24T21:38:39
|
Python
|
UTF-8
|
Python
| false
| false
| 935
|
py
|
check_large_memory_numpy.py
|
import sys
import pytest
from PIL import Image
# This test is not run automatically.
#
# It requires > 2gb memory for the >2 gigapixel image generated in the
# second test. Running this automatically would amount to a denial of
# service on our testing infrastructure. I expect this test to fail
# on any 32-bit machine, as well as any smallish things (like
# Raspberry Pis).
np = pytest.importorskip("numpy", reason="NumPy not installed")
YDIM = 32769
XDIM = 48000
pytestmark = pytest.mark.skipif(sys.maxsize <= 2**32, reason="requires 64-bit system")
def _write_png(tmp_path, xdim, ydim):
dtype = np.uint8
a = np.zeros((xdim, ydim), dtype=dtype)
f = str(tmp_path / "temp.png")
im = Image.fromarray(a, "L")
im.save(f)
def test_large(tmp_path):
"""succeeded prepatch"""
_write_png(tmp_path, XDIM, YDIM)
def test_2gpx(tmp_path):
"""failed prepatch"""
_write_png(tmp_path, XDIM, XDIM)
|
fc0d61cacd0ca746b6796ef5f9dd963d5b2f6623
|
ec8d9e1595ccc252a57d1769382bb98d604e40a9
|
/pyrolite/geochem/quality/__init__.py
|
9905e742c99e2aa7fd2df6ccdbb3bba0a538483c
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
morganjwilliams/pyrolite
|
2a8e738e14099f72282a80acb7139d71eec71efc
|
ac7cd52029909738a41143b89be880e0da419266
|
refs/heads/main
| 2023-08-31T09:20:33.282184
| 2023-07-21T06:45:24
| 2023-07-21T06:45:24
| 137,172,322
| 113
| 37
|
NOASSERTION
| 2023-08-29T10:49:17
| 2018-06-13T06:31:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
__init__.py
|
"""
Submodule for data quality checking and assurance.
Todo
------
* Identifying interval data by identifying dominant low-end periodicity at scales similar to the lowest values
This is largely a metadata thing for single points, but a tractable problem for
multi-point data groups.
Assumptions around normality or unimodaltiy may not be useful in practice here.
Non-limited data should exhibit intervals related to the variance
and overall number of points,
Could use ratio data here to also include information regarding 'expected value';
although this is in a way tangential information (the detection limit is ~
independent of the data)
Could use entropy measures either over histograms or over FFT histograms.
Spectral methods could be useful - its a simlar concept to harmonics in a way;
offset from a zero-offset to 0+n, 0+2n, 0+3n.. etc peaks with decreasing
magnitude (which occur at lower value)
"""
from ...util.log import Handle
logger = Handle(__name__)
|
f25867d47b1bee6aff62a84dc3958c921a9848dd
|
6ba5116e37f67c613c855efd33a1615c8c851054
|
/openaerostruct/transfer/load_transfer.py
|
9004cfa509e155404985acf5f6e0c78bf137399e
|
[
"Apache-2.0"
] |
permissive
|
mdolab/OpenAeroStruct
|
08dc28fc12df9927c78469ea6188f4cee87a9d5b
|
f2f974fb06a34244a3bed6f99b486769256353fe
|
refs/heads/main
| 2023-08-04T02:04:58.875285
| 2023-07-29T19:31:22
| 2023-07-29T19:31:22
| 53,821,266
| 154
| 100
|
Apache-2.0
| 2023-07-29T19:31:23
| 2016-03-14T02:37:30
|
Python
|
UTF-8
|
Python
| false
| false
| 11,373
|
py
|
load_transfer.py
|
import numpy as np
import openmdao.api as om
class LoadTransfer(om.ExplicitComponent):
"""
Perform aerodynamic load transfer.
Apply the computed sectional forces on the aerodynamic surfaces to
obtain the deformed mesh FEM loads.
Parameters
----------
def_mesh[nx, ny, 3] : numpy array
Array defining the lifting surfaces after deformation.
Arrays will be flattened in Fortran order (only relevant when more than one chordwise panel).
sec_forces[nx-1, ny-1, 3] : numpy array
Array containing the sectional forces acting on each panel.
Returns
-------
loads[ny, 6] : numpy array
Array containing the loads applied on the FEM component at each node,
computed from the sectional forces. The first 3 columns are N, and the last 3 are N*m.
"""
def initialize(self):
self.options.declare("surface", types=dict)
def setup(self):
self.surface = surface = self.options["surface"]
self.nx = nx = surface["mesh"].shape[0]
self.ny = ny = surface["mesh"].shape[1]
if surface["fem_model_type"] == "tube":
self.fem_origin = surface["fem_origin"]
else:
y_upper = surface["data_y_upper"]
x_upper = surface["data_x_upper"]
y_lower = surface["data_y_lower"]
fem_origin = (x_upper[0] * (y_upper[0] - y_lower[0]) + x_upper[-1] * (y_upper[-1] - y_lower[-1])) / (
(y_upper[0] - y_lower[0]) + (y_upper[-1] - y_lower[-1])
)
# For some reason, surface data is complex in some tests.
self.fem_origin = float(fem_origin)
self.w1 = 0.25
self.w2 = self.fem_origin
self.add_input("def_mesh", val=np.zeros((nx, ny, 3)), units="m")
self.add_input("sec_forces", val=np.zeros((nx - 1, ny - 1, 3)), units="N")
self.add_output("loads", val=np.zeros((self.ny, 6)), units="N") # WARNING!!! UNITS ARE A MIXTURE OF N & N*m
# Well, technically the units of this load array are mixed.
# The first 3 indices are N and the last 3 are N*m.
# Derivatives
# First, the direct loads wrt sec_forces terms.
base_row = np.array([0, 1, 2, 6, 7, 8])
base_col = np.array([0, 1, 2, 0, 1, 2])
row = np.tile(base_row, ny - 1) + np.repeat(6 * np.arange(ny - 1), 6)
col = np.tile(base_col, ny - 1) + np.repeat(3 * np.arange(ny - 1), 6)
rows1 = np.tile(row, nx - 1)
cols1 = np.tile(col, nx - 1) + np.repeat(3 * (ny - 1) * np.arange(nx - 1), 6 * (ny - 1))
# Then, the term from the cross product.
base_row = np.array([3, 3, 4, 4, 5, 5])
base_col = np.array([1, 2, 0, 2, 0, 1])
row = np.tile(base_row, ny - 1) + np.repeat(6 * np.arange(ny - 1), 6)
col = np.tile(base_col, ny - 1) + np.repeat(3 * np.arange(ny - 1), 6)
row1 = np.tile(row, nx - 1)
col1 = np.tile(col, nx - 1) + np.repeat(3 * (ny - 1) * np.arange(nx - 1), 6 * (ny - 1))
rows2 = np.tile(row1, 2) + np.repeat(np.array([0, 6]), 6 * (nx - 1) * (ny - 1))
cols2 = np.tile(col1, 2)
rows = np.concatenate([rows1, rows2])
cols = np.concatenate([cols1, cols2])
self.declare_partials(of="loads", wrt="sec_forces", rows=rows, cols=cols)
# Top diagonal is forward-most mesh point.
base_row = np.array([3, 3, 4, 4, 5, 5])
base_col = np.array([4, 5, 3, 5, 3, 4])
row = np.tile(base_row, ny - 1) + np.repeat(6 * np.arange(ny - 1), 6)
col = np.tile(base_col, ny - 1) + np.repeat(3 * np.arange(ny - 1), 6)
rows1 = np.tile(row, nx)
cols1 = np.tile(col, nx) + np.repeat(3 * ny * np.arange(nx), 6 * (ny - 1))
# Bottom diagonal is backward-most mesh point.
base_row = np.array([9, 9, 10, 10, 11, 11])
base_col = np.array([1, 2, 0, 2, 0, 1])
row = np.tile(base_row, ny - 1) + np.repeat(6 * np.arange(ny - 1), 6)
col = np.tile(base_col, ny - 1) + np.repeat(3 * np.arange(ny - 1), 6)
rows2 = np.tile(row, nx)
cols2 = np.tile(col, nx) + np.repeat(3 * ny * np.arange(nx), 6 * (ny - 1))
# Central Diagonal blocks
base_row = np.array([3, 3, 4, 4, 5, 5])
base_col = np.array([1, 2, 0, 2, 0, 1])
row = np.tile(base_row, ny) + np.repeat(6 * np.arange(ny), 6)
col = np.tile(base_col, ny) + np.repeat(3 * np.arange(ny), 6)
rows3 = np.tile(row, nx)
cols3 = np.tile(col, nx) + np.repeat(3 * ny * np.arange(nx), 6 * ny)
rows = np.concatenate([rows1, rows2, rows3])
cols = np.concatenate([cols1, cols2, cols3])
self.declare_partials(of="loads", wrt="def_mesh", rows=rows, cols=cols)
# -------------------------------- Check Partial Options-------------------------------------
self.set_check_partial_options("*", method="cs", step=1e-40)
def compute(self, inputs, outputs):
mesh = inputs["def_mesh"] # [nx, ny, 3]
sec_forces = inputs["sec_forces"]
# ----- 1. Forces transfer -----
# Only need to zero out the part that is assigned via +=
outputs["loads"][-1, :] = 0.0
# The aero force acting on each panel is evenly transferred to the adjacent FEM nodes.
sec_forces_sum = 0.5 * np.sum(sec_forces, axis=0)
outputs["loads"][:-1, :3] = sec_forces_sum
outputs["loads"][1:, :3] += sec_forces_sum
# ----- 2. Moments transfer -----
# Compute the aerodynamic centers at the quarter-chord point of each panel
# a_pts [nx-1, ny-1, 3]
a_pts = (
0.5 * (1 - self.w1) * mesh[:-1, :-1, :]
+ 0.5 * self.w1 * mesh[1:, :-1, :]
+ 0.5 * (1 - self.w1) * mesh[:-1, 1:, :]
+ 0.5 * self.w1 * mesh[1:, 1:, :]
)
# Compute the structural nodes based on the fem_origin location (weighted sum of the LE and TE mesh vertices)
# s_pts [ny, 3]
s_pts = (1 - self.w2) * mesh[0, :, :] + self.w2 * mesh[-1, :, :]
# The moment arm is between the aerodynamic centers of each panel and the FEM nodes.
# Moment contribution of sec_forces (acting on aero center) to the inner/outer adjacent node
moment_in = np.sum(np.cross(a_pts - s_pts[:-1, :], 0.5 * sec_forces), axis=0) # [ny-1, 3]
moment_out = np.sum(np.cross(a_pts - s_pts[1:, :], 0.5 * sec_forces), axis=0)
# Total moment at each node = sum of moment_in and moment_out, except the edge nodes.s
outputs["loads"][:-1, 3:] = moment_in
outputs["loads"][1:, 3:] += moment_out
def compute_partials(self, inputs, partials):
mesh = inputs["def_mesh"]
sec_forces = inputs["sec_forces"]
ny = self.ny
nx = self.nx
w1 = self.w1
w2 = self.w2
# Compute the aerodynamic centers at the quarter-chord point of each panel
a_pts = (
0.5 * (1 - w1) * mesh[:-1, :-1, :]
+ 0.5 * w1 * mesh[1:, :-1, :]
+ 0.5 * (1 - w1) * mesh[:-1, 1:, :]
+ 0.5 * w1 * mesh[1:, 1:, :]
)
# Compute the structural nodes
s_pts = (1 - self.w2) * mesh[0, :, :] + self.w2 * mesh[-1, :, :]
# ----- 1. dmoment__dsec_forces -----
# Sensitivity of loads (moments) at inner node wrt sec_force
diff_in = 0.5 * (a_pts - s_pts[:-1, :]) # moment arm from inner node to aero center.
dmom_dsec_in = np.empty((nx - 1, ny - 1, 6))
dmom_dsec_in[:, :, 0] = -diff_in[:, :, 2]
dmom_dsec_in[:, :, 1] = diff_in[:, :, 1]
dmom_dsec_in[:, :, 2] = diff_in[:, :, 2]
dmom_dsec_in[:, :, 3] = -diff_in[:, :, 0]
dmom_dsec_in[:, :, 4] = -diff_in[:, :, 1]
dmom_dsec_in[:, :, 5] = diff_in[:, :, 0]
# Repeat for moments at outer node wrt sec_force
diff_out = 0.5 * (a_pts - s_pts[1:, :]) # moment arm from outer node to aero center.
dmom_dsec_out = np.empty((nx - 1, ny - 1, 6))
dmom_dsec_out[:, :, 0] = -diff_out[:, :, 2]
dmom_dsec_out[:, :, 1] = diff_out[:, :, 1]
dmom_dsec_out[:, :, 2] = diff_out[:, :, 2]
dmom_dsec_out[:, :, 3] = -diff_out[:, :, 0]
dmom_dsec_out[:, :, 4] = -diff_out[:, :, 1]
dmom_dsec_out[:, :, 5] = diff_out[:, :, 0]
id1 = 6 * (ny - 1) * (nx - 1)
partials["loads", "sec_forces"][:id1] = 0.5
id2 = id1 * 2
dmom_dsec_in = dmom_dsec_in.flatten()
dmom_dsec_out = dmom_dsec_out.flatten()
partials["loads", "sec_forces"][id1:id2] = dmom_dsec_in
partials["loads", "sec_forces"][id2:] = dmom_dsec_out
# ----- 2. dmoment__dmesh -----
# Sensitivity of moments at inner nodes wrt diff_in (upper diagonal)
dmom_ddiff_in = np.zeros((nx - 1, ny - 1, 6))
dmom_ddiff_in[:, :, 0] = sec_forces[:, :, 2]
dmom_ddiff_in[:, :, 1] = -sec_forces[:, :, 1]
dmom_ddiff_in[:, :, 2] = -sec_forces[:, :, 2]
dmom_ddiff_in[:, :, 3] = sec_forces[:, :, 0]
dmom_ddiff_in[:, :, 4] = sec_forces[:, :, 1]
dmom_ddiff_in[:, :, 5] = -sec_forces[:, :, 0]
dmom_ddiff_in_sum = np.sum(dmom_ddiff_in, axis=0)
# Sensitivity of moments at outer nodes wrt diff_out (lower diagonal)
dmom_ddiff_out = np.zeros((nx - 1, ny - 1, 6))
dmom_ddiff_out[:, :, 0] = sec_forces[:, :, 2]
dmom_ddiff_out[:, :, 1] = -sec_forces[:, :, 1]
dmom_ddiff_out[:, :, 2] = -sec_forces[:, :, 2]
dmom_ddiff_out[:, :, 3] = sec_forces[:, :, 0]
dmom_ddiff_out[:, :, 4] = sec_forces[:, :, 1]
dmom_ddiff_out[:, :, 5] = -sec_forces[:, :, 0]
dmom_ddiff_out_sum = np.sum(dmom_ddiff_out, axis=0)
dmon_ddiff_diag = np.zeros((nx - 1, ny, 6))
dmon_ddiff_diag[:, 1:, :] = dmom_ddiff_out
dmon_ddiff_diag[:, :-1, :] += dmom_ddiff_in
dmon_ddiff_diag_sum = np.zeros((1, ny, 6))
dmon_ddiff_diag_sum[:, :-1, :] = dmom_ddiff_in_sum
dmon_ddiff_diag_sum[:, 1:, :] += dmom_ddiff_out_sum
dmom_ddiff_in = dmom_ddiff_in.flatten()
dmom_ddiff_out = dmom_ddiff_out.flatten()
dmon_ddiff_diag = dmon_ddiff_diag.flatten()
dmon_ddiff_diag_sum = dmon_ddiff_diag_sum.flatten()
idy = 6 * (ny - 1)
idx = idy * nx
idw = idy * (nx - 1)
# Need to zero out what's there because our assignments overlap.
partials["loads", "def_mesh"][:] = 0.0
# Upper diagonal blocks
partials["loads", "def_mesh"][:idw] = dmom_ddiff_in * ((1 - w1) * 0.25)
partials["loads", "def_mesh"][idy:idx] += dmom_ddiff_in * (w1 * 0.25)
# Lower Diagonal blocks
id2 = idx * 2
partials["loads", "def_mesh"][idx : idx + idw] = dmom_ddiff_out * ((1 - w1) * 0.25)
partials["loads", "def_mesh"][idx + idy : id2] += dmom_ddiff_out * (w1 * 0.25)
# Central Diagonal blocks
idy = 6 * ny
idz = 6 * (nx - 1)
id3 = id2 + idw + idz
partials["loads", "def_mesh"][id2:id3] = dmon_ddiff_diag * ((1 - w1) * 0.25)
partials["loads", "def_mesh"][id2 : id2 + idy] -= dmon_ddiff_diag_sum * ((1 - w2) * 0.5)
id2 += idy
id3 += idy
partials["loads", "def_mesh"][id2:id3] += dmon_ddiff_diag * (w1 * 0.25)
partials["loads", "def_mesh"][id3 - idy : id3] -= dmon_ddiff_diag_sum * (w2 * 0.5)
|
5faa5f87da9c39e9f3301ed7dfbfc9d3f110446f
|
aaa72c72c9089a5f4a71f8151ab8304297692680
|
/pysaliency/plotting.py
|
020537c49074dec16a16835c43ce01e1785b5864
|
[
"MIT"
] |
permissive
|
matthias-k/pysaliency
|
2569653a727247cc81c0a994acaeface93124ee7
|
0664dba9b637f64b089b3a44b191dd24da84a30e
|
refs/heads/master
| 2023-08-11T08:03:26.527271
| 2022-06-11T21:52:31
| 2022-06-11T21:52:31
| 46,892,512
| 142
| 42
|
MIT
| 2023-07-06T14:03:09
| 2015-11-25T23:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
plotting.py
|
from __future__ import absolute_import, print_function, division, unicode_literals
try:
import matplotlib.pyplot as plt
except ImportError:
# If matplotlib is not there, just ignore it
pass
import numpy as np
from scipy.ndimage import zoom
def plot_information_gain(information_gain, ax=None, color_range = None, image=None, frame=False,
thickness = 1.0, zoom_factor=1.0, threshold=0.05, rel_levels=None,
alpha=0.5, color_offset = 0.25, plot_color_bar=True):
"""
Create pixel space information gain plots as in the paper.
Parameters:
-----------
information gain: the information gain to plot.
ax: the matplotlib axes object to use. If none, use current axes.
color_range: Full range of colorbar
"""
if ax is None:
ax = plt.gca()
ig = information_gain
if zoom_factor != 1.0:
ig = zoom(ig, zoom_factor, order=0)
if color_range is None:
color_range = (ig.min(), ig.max())
if not isinstance(color_range, (tuple, list)):
color_range = (-color_range, color_range)
color_total_max = max(np.abs(color_range[0]), np.abs(color_range[1]))
if image is not None:
if image.ndim == 3:
image = image.sum(axis=-1)
ax.imshow(image, alpha=0.3)
if rel_levels is None:
rel_levels = [0.1, 0.4, 0.7]
# from https://stackoverflow.com/questions/8580631/transparent-colormap
cm = plt.cm.get_cmap('RdBu')
cm._init()
alphas = (np.abs(np.linspace(-1.0, 1.0, cm.N)))
alphas = np.ones_like(alphas)*alpha
cm._lut[:-3, -1] = alphas
levels = []
colors = []
min_val = np.abs(ig.min())
max_val = np.abs(ig.max())
total_max = max(min_val, max_val)
def get_color(val):
# value relative -1 .. 1
rel_val = val / color_total_max
# shift around 0
rel_val = (rel_val + np.sign(rel_val) * color_offset) / (1+color_offset)
# transform to 0 .. 1
rel_val = (0.5 + rel_val / 2)
return cm(rel_val)
if min_val / total_max > threshold:
for l in [1.0]+rel_levels[::-1]:
val = -l*min_val
levels.append(val)
colors.append(get_color(val))
else:
levels.append(-total_max)
colors.append('white')
# We want to use the color from the value nearer to zero
colors = colors[1:]
colors.append((1.0, 1.0, 1.0, 0.0))
if max_val / total_max > threshold:
for l in rel_levels+[1.0]:
val = l*max_val
levels.append(val)
colors.append(get_color(val))
else:
levels.append(total_max)
#print rel_vals
ax.contourf(ig, levels=levels,
colors=colors,
vmin=-color_total_max, vmax=color_total_max
)
ax.contour(ig, levels=levels,
# colors=colors,
# vmin=-color_range, vmax=color_range
colors = 'gray',
linestyles='solid',
linewidths=0.6*thickness
)
if plot_color_bar:
## Draw color range bar
h = 100
w = 10
t = np.empty((100, 10, 4))
for y in range(h):
for x in range(w):
val = (y/h) * (color_range[1] - color_range[0]) + color_range[0]
color = np.asarray(get_color(val))
if not -min_val <= val <= max_val:
color[-1] *= 0.4
else:
color[-1] = 1
t[y, x, :] = color
ax.imshow(t, extent=(0.95*ig.shape[1], 0.98*ig.shape[1],
0.1*ig.shape[0], 0.9*ig.shape[0]))
ax.set_xlim(0, ig.shape[1])
ax.set_ylim(ig.shape[0], 0)
if frame:
# Just a frame
ax.set_xticks([])
ax.set_yticks([])
[i.set_linewidth(i.get_linewidth()*thickness) for i in ax.spines.itervalues()]
else:
ax.set_axis_off()
def normalize_log_density(log_density):
""" convertes a log density into a map of the cummulative distribution function.
"""
density = np.exp(log_density)
flat_density = density.flatten()
inds = flat_density.argsort()[::-1]
sorted_density = flat_density[inds]
cummulative = np.cumsum(sorted_density)
unsorted_cummulative = cummulative[np.argsort(inds)]
return unsorted_cummulative.reshape(log_density.shape)
def visualize_distribution(log_densities, ax = None):
if ax is None:
ax = plt.gca()
t = normalize_log_density(log_densities)
img = ax.imshow(t, cmap=plt.cm.viridis)
levels = levels=[0, 0.25, 0.5, 0.75, 1.0]
cs = ax.contour(t, levels=levels, colors='black')
#plt.clabel(cs)
return img, cs
|
529fce474217d5b0960ce03a3061daf58528846f
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/tests/api/api/test_client_spec.py
|
7c8b63aeec9bf1850c3618d2e18d95f7dcf7c1f3
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 8,598
|
py
|
test_client_spec.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import http
import json
import unittest.mock
import fastapi.testclient
import kubernetes
import sqlalchemy.orm
import mlrun
import mlrun.api.crud
import mlrun.api.utils.clients.iguazio
import mlrun.common.schemas
import mlrun.errors
import mlrun.runtimes
import mlrun.utils.version
def test_client_spec(
db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient
) -> None:
k8s_api = kubernetes.client.ApiClient()
overridden_ui_projects_prefix = "some-prefix"
mlrun.mlconf.ui.projects_prefix = overridden_ui_projects_prefix
nuclio_version = "x.x.x"
mlrun.mlconf.nuclio_version = nuclio_version
mlrun.mlconf.function_defaults.preemption_mode = "constrain"
node_selector = {"label-1": "val1"}
mlrun.mlconf.preemptible_nodes.node_selector = base64.b64encode(
json.dumps(node_selector).encode("utf-8")
)
ce_mode = "some-ce-mode"
ce_release = "y.y.y"
mlrun.mlconf.ce.mode = ce_mode
mlrun.mlconf.ce.release = ce_release
feature_store_data_prefix_default = "feature-store-data-prefix-default"
feature_store_data_prefix_nosql = "feature-store-data-prefix-nosql"
feature_store_data_prefix_redisnosql = "feature-store-data-prefix-redisnosql"
mlrun.mlconf.feature_store.data_prefixes.default = feature_store_data_prefix_default
mlrun.mlconf.feature_store.data_prefixes.nosql = feature_store_data_prefix_nosql
mlrun.mlconf.feature_store.data_prefixes.redisnosql = (
feature_store_data_prefix_redisnosql
)
tolerations = [
kubernetes.client.V1Toleration(
effect="NoSchedule",
key="test1",
operator="Exists",
toleration_seconds=3600,
)
]
serialized_tolerations = k8s_api.sanitize_for_serialization(tolerations)
mlrun.mlconf.preemptible_nodes.tolerations = base64.b64encode(
json.dumps(serialized_tolerations).encode("utf-8")
)
mlrun.mlconf.httpdb.logs.pipelines.pull_state.mode = "enabled"
response = client.get("client-spec")
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["scrape_metrics"] is None
assert response_body["ui_projects_prefix"] == overridden_ui_projects_prefix
assert response_body["nuclio_version"] == nuclio_version
# check nuclio_version cache
mlrun.mlconf.nuclio_version = "y.y.y"
response = client.get("client-spec")
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["nuclio_version"] == nuclio_version
# check default_function_pod_resources when default
assert response_body["default_function_pod_resources"] is None
# check default_function_pod_resources when values set
mlrun.mlconf.default_function_pod_resources = {
"requests": {"cpu": "25m", "memory": "1M", "gpu": ""},
"limits": {"cpu": "2", "memory": "1G", "gpu": ""},
}
response = client.get("client-spec")
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert (
response_body["default_function_pod_resources"]
== mlrun.mlconf.default_function_pod_resources.to_dict()
)
assert (
response_body["default_preemption_mode"]
== mlrun.mlconf.function_defaults.preemption_mode
)
assert response_body[
"preemptible_nodes_node_selector"
] == mlrun.mlconf.preemptible_nodes.node_selector.decode("utf-8")
assert response_body[
"preemptible_nodes_tolerations"
] == mlrun.mlconf.preemptible_nodes.tolerations.decode("utf-8")
assert response_body["logs"] == mlrun.mlconf.httpdb.logs.to_dict()
assert response_body["logs"]["pipelines"]["pull_state"]["mode"] == "enabled"
assert response_body["feature_store_data_prefixes"]["default"] == (
feature_store_data_prefix_default
)
assert response_body["feature_store_data_prefixes"]["nosql"] == (
feature_store_data_prefix_nosql
)
assert response_body["feature_store_data_prefixes"]["redisnosql"] == (
feature_store_data_prefix_redisnosql
)
assert response_body["ce_mode"] == response_body["ce"]["mode"] == ce_mode
assert response_body["ce"]["release"] == ce_release
def test_client_spec_response_based_on_client_version(
db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient
) -> None:
response = client.get("client-spec")
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:unstable"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:unstable"
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "",
mlrun.common.schemas.HeaderNames.python_version: "",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:unstable"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:unstable"
# test response when the server has a version
with unittest.mock.patch.object(
mlrun.utils.version.Version, "get", return_value={"version": "1.3.0-rc23"}
):
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "",
mlrun.common.schemas.HeaderNames.python_version: "",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:1.3.0-rc23"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:1.3.0-rc23"
# test clients older than 1.3.0, when client only provided client version
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "1.2.0",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:1.2.0"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:1.2.0"
# test clients from 1.3.0+ and return based also on the client python version
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "1.3.0-rc20",
mlrun.common.schemas.HeaderNames.python_version: "3.7.13",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:1.3.0-rc20-py37"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:1.3.0-rc20-py37"
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "1.3.0-rc20",
mlrun.common.schemas.HeaderNames.python_version: "3.9.13",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:1.3.0-rc20"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:1.3.0-rc20"
# verify that we are falling back to resolve only by server
response = client.get(
"client-spec",
headers={
mlrun.common.schemas.HeaderNames.client_version: "test-integration",
mlrun.common.schemas.HeaderNames.python_version: "3.9.13",
},
)
assert response.status_code == http.HTTPStatus.OK.value
response_body = response.json()
assert response_body["kfp_image"] == "mlrun/mlrun:1.3.0-rc23"
assert response_body["dask_kfp_image"] == "mlrun/ml-base:1.3.0-rc23"
|
af3ef43f7983de51240025be986af4b6b836e590
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/Paste/paste/wsgilib.py
|
98299e24734a7fa649f56948d077627b5ff5ae4c
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 20,285
|
py
|
wsgilib.py
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
A module of many disparate routines.
"""
from __future__ import print_function
# functions which moved to paste.request and paste.response
# Deprecated around 15 Dec 2005
from paste.request import get_cookies, parse_querystring, parse_formvars
from paste.request import construct_url, path_info_split, path_info_pop
from paste.response import HeaderDict, has_header, header_value, remove_header
from paste.response import error_body_response, error_response, error_response_app
from traceback import print_exception
import six
import sys
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import unquote, urlsplit
import warnings
__all__ = ['add_close', 'add_start_close', 'capture_output', 'catch_errors',
'catch_errors_app', 'chained_app_iters', 'construct_url',
'dump_environ', 'encode_unicode_app_iter', 'error_body_response',
'error_response', 'get_cookies', 'has_header', 'header_value',
'interactive', 'intercept_output', 'path_info_pop',
'path_info_split', 'raw_interactive', 'send_file']
class add_close(object):
"""
An an iterable that iterates over app_iter, then calls
close_func.
"""
def __init__(self, app_iterable, close_func):
self.app_iterable = app_iterable
self.app_iter = iter(app_iterable)
self.close_func = close_func
self._closed = False
def __iter__(self):
return self
def next(self):
return self.app_iter.next()
def close(self):
self._closed = True
if hasattr(self.app_iterable, 'close'):
self.app_iterable.close()
self.close_func()
def __del__(self):
if not self._closed:
# We can't raise an error or anything at this stage
print("Error: app_iter.close() was not called when finishing "
"WSGI request. finalization function %s not called"
% self.close_func, file=sys.stderr)
class add_start_close(object):
"""
An an iterable that iterates over app_iter, calls start_func
before the first item is returned, then calls close_func at the
end.
"""
def __init__(self, app_iterable, start_func, close_func=None):
self.app_iterable = app_iterable
self.app_iter = iter(app_iterable)
self.first = True
self.start_func = start_func
self.close_func = close_func
self._closed = False
def __iter__(self):
return self
def next(self):
if self.first:
self.start_func()
self.first = False
return next(self.app_iter)
__next__ = next
def close(self):
self._closed = True
if hasattr(self.app_iterable, 'close'):
self.app_iterable.close()
if self.close_func is not None:
self.close_func()
def __del__(self):
if not self._closed:
# We can't raise an error or anything at this stage
print("Error: app_iter.close() was not called when finishing "
"WSGI request. finalization function %s not called"
% self.close_func, file=sys.stderr)
class chained_app_iters(object):
"""
Chains several app_iters together, also delegating .close() to each
of them.
"""
def __init__(self, *chained):
self.app_iters = chained
self.chained = [iter(item) for item in chained]
self._closed = False
def __iter__(self):
return self
def next(self):
if len(self.chained) == 1:
return self.chained[0].next()
else:
try:
return self.chained[0].next()
except StopIteration:
self.chained.pop(0)
return self.next()
def close(self):
self._closed = True
got_exc = None
for app_iter in self.app_iters:
try:
if hasattr(app_iter, 'close'):
app_iter.close()
except:
got_exc = sys.exc_info()
if got_exc:
six.reraise(got_exc[0], got_exc[1], got_exc[2])
def __del__(self):
if not self._closed:
# We can't raise an error or anything at this stage
print("Error: app_iter.close() was not called when finishing "
"WSGI request. finalization function %s not called"
% self.close_func, file=sys.stderr)
class encode_unicode_app_iter(object):
"""
Encodes an app_iterable's unicode responses as strings
"""
def __init__(self, app_iterable, encoding=sys.getdefaultencoding(),
errors='strict'):
self.app_iterable = app_iterable
self.app_iter = iter(app_iterable)
self.encoding = encoding
self.errors = errors
def __iter__(self):
return self
def next(self):
content = next(self.app_iter)
if isinstance(content, six.text_type):
content = content.encode(self.encoding, self.errors)
return content
__next__ = next
def close(self):
if hasattr(self.app_iterable, 'close'):
self.app_iterable.close()
def catch_errors(application, environ, start_response, error_callback,
ok_callback=None):
"""
Runs the application, and returns the application iterator (which should be
passed upstream). If an error occurs then error_callback will be called with
exc_info as its sole argument. If no errors occur and ok_callback is given,
then it will be called with no arguments.
"""
try:
app_iter = application(environ, start_response)
except:
error_callback(sys.exc_info())
raise
if type(app_iter) in (list, tuple):
# These won't produce exceptions
if ok_callback:
ok_callback()
return app_iter
else:
return _wrap_app_iter(app_iter, error_callback, ok_callback)
class _wrap_app_iter(object):
def __init__(self, app_iterable, error_callback, ok_callback):
self.app_iterable = app_iterable
self.app_iter = iter(app_iterable)
self.error_callback = error_callback
self.ok_callback = ok_callback
if hasattr(self.app_iterable, 'close'):
self.close = self.app_iterable.close
def __iter__(self):
return self
def next(self):
try:
return self.app_iter.next()
except StopIteration:
if self.ok_callback:
self.ok_callback()
raise
except:
self.error_callback(sys.exc_info())
raise
def catch_errors_app(application, environ, start_response, error_callback_app,
ok_callback=None, catch=Exception):
"""
Like ``catch_errors``, except error_callback_app should be a
callable that will receive *three* arguments -- ``environ``,
``start_response``, and ``exc_info``. It should call
``start_response`` (*with* the exc_info argument!) and return an
iterator.
"""
try:
app_iter = application(environ, start_response)
except catch:
return error_callback_app(environ, start_response, sys.exc_info())
if type(app_iter) in (list, tuple):
# These won't produce exceptions
if ok_callback is not None:
ok_callback()
return app_iter
else:
return _wrap_app_iter_app(
environ, start_response, app_iter,
error_callback_app, ok_callback, catch=catch)
class _wrap_app_iter_app(object):
def __init__(self, environ, start_response, app_iterable,
error_callback_app, ok_callback, catch=Exception):
self.environ = environ
self.start_response = start_response
self.app_iterable = app_iterable
self.app_iter = iter(app_iterable)
self.error_callback_app = error_callback_app
self.ok_callback = ok_callback
self.catch = catch
if hasattr(self.app_iterable, 'close'):
self.close = self.app_iterable.close
def __iter__(self):
return self
def next(self):
try:
return self.app_iter.next()
except StopIteration:
if self.ok_callback:
self.ok_callback()
raise
except self.catch:
if hasattr(self.app_iterable, 'close'):
try:
self.app_iterable.close()
except:
# @@: Print to wsgi.errors?
pass
new_app_iterable = self.error_callback_app(
self.environ, self.start_response, sys.exc_info())
app_iter = iter(new_app_iterable)
if hasattr(new_app_iterable, 'close'):
self.close = new_app_iterable.close
self.next = app_iter.next
return self.next()
def raw_interactive(application, path='', raise_on_wsgi_error=False,
**environ):
"""
Runs the application in a fake environment.
"""
assert "path_info" not in environ, "argument list changed"
if raise_on_wsgi_error:
errors = ErrorRaiser()
else:
errors = six.BytesIO()
basic_environ = {
# mandatory CGI variables
'REQUEST_METHOD': 'GET', # always mandatory
'SCRIPT_NAME': '', # may be empty if app is at the root
'PATH_INFO': '', # may be empty if at root of app
'SERVER_NAME': 'localhost', # always mandatory
'SERVER_PORT': '80', # always mandatory
'SERVER_PROTOCOL': 'HTTP/1.0',
# mandatory wsgi variables
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': six.BytesIO(),
'wsgi.errors': errors,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
}
if path:
(_, _, path_info, query, fragment) = urlsplit(str(path))
path_info = unquote(path_info)
# urlsplit returns unicode so coerce it back to str
path_info, query = str(path_info), str(query)
basic_environ['PATH_INFO'] = path_info
if query:
basic_environ['QUERY_STRING'] = query
for name, value in environ.items():
name = name.replace('__', '.')
basic_environ[name] = value
if ('SERVER_NAME' in basic_environ
and 'HTTP_HOST' not in basic_environ):
basic_environ['HTTP_HOST'] = basic_environ['SERVER_NAME']
istream = basic_environ['wsgi.input']
if isinstance(istream, bytes):
basic_environ['wsgi.input'] = six.BytesIO(istream)
basic_environ['CONTENT_LENGTH'] = len(istream)
data = {}
output = []
headers_set = []
headers_sent = []
def start_response(status, headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception only if headers sent
six.reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
# avoid dangling circular reference
exc_info = None
elif headers_set:
# You cannot set the headers more than once, unless the
# exc_info is provided.
raise AssertionError("Headers already set and no exc_info!")
headers_set.append(True)
data['status'] = status
data['headers'] = headers
return output.append
app_iter = application(basic_environ, start_response)
try:
try:
for s in app_iter:
if not isinstance(s, six.binary_type):
raise ValueError(
"The app_iter response can only contain bytes (not "
"unicode); got: %r" % s)
headers_sent.append(True)
if not headers_set:
raise AssertionError("Content sent w/o headers!")
output.append(s)
except TypeError as e:
# Typically "iteration over non-sequence", so we want
# to give better debugging information...
e.args = ((e.args[0] + ' iterable: %r' % app_iter),) + e.args[1:]
raise
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
return (data['status'], data['headers'], b''.join(output),
errors.getvalue())
class ErrorRaiser(object):
def flush(self):
pass
def write(self, value):
if not value:
return
raise AssertionError(
"No errors should be written (got: %r)" % value)
def writelines(self, seq):
raise AssertionError(
"No errors should be written (got lines: %s)" % list(seq))
def getvalue(self):
return ''
def interactive(*args, **kw):
"""
Runs the application interatively, wrapping `raw_interactive` but
returning the output in a formatted way.
"""
status, headers, content, errors = raw_interactive(*args, **kw)
full = StringIO()
if errors:
full.write('Errors:\n')
full.write(errors.strip())
full.write('\n----------end errors\n')
full.write(status + '\n')
for name, value in headers:
full.write('%s: %s\n' % (name, value))
full.write('\n')
full.write(content)
return full.getvalue()
interactive.proxy = 'raw_interactive'
def dump_environ(environ, start_response):
"""
Application which simply dumps the current environment
variables out as a plain text response.
"""
output = []
keys = list(environ.keys())
keys.sort()
for k in keys:
v = str(environ[k]).replace("\n","\n ")
output.append("%s: %s\n" % (k, v))
output.append("\n")
content_length = environ.get("CONTENT_LENGTH", '')
if content_length:
output.append(environ['wsgi.input'].read(int(content_length)))
output.append("\n")
output = "".join(output)
if six.PY3:
output = output.encode('utf8')
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response("200 OK", headers)
return [output]
def send_file(filename):
warnings.warn(
"wsgilib.send_file has been moved to paste.fileapp.FileApp",
DeprecationWarning, 2)
from paste import fileapp
return fileapp.FileApp(filename)
def capture_output(environ, start_response, application):
"""
Runs application with environ and start_response, and captures
status, headers, and body.
Sends status and header, but *not* body. Returns (status,
headers, body). Typically this is used like:
.. code-block:: python
def dehtmlifying_middleware(application):
def replacement_app(environ, start_response):
status, headers, body = capture_output(
environ, start_response, application)
content_type = header_value(headers, 'content-type')
if (not content_type
or not content_type.startswith('text/html')):
return [body]
body = re.sub(r'<.*?>', '', body)
return [body]
return replacement_app
"""
warnings.warn(
'wsgilib.capture_output has been deprecated in favor '
'of wsgilib.intercept_output',
DeprecationWarning, 2)
data = []
output = StringIO()
def replacement_start_response(status, headers, exc_info=None):
if data:
data[:] = []
data.append(status)
data.append(headers)
start_response(status, headers, exc_info)
return output.write
app_iter = application(environ, replacement_start_response)
try:
for item in app_iter:
output.write(item)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
if not data:
data.append(None)
if len(data) < 2:
data.append(None)
data.append(output.getvalue())
return data
def intercept_output(environ, application, conditional=None,
start_response=None):
"""
Runs application with environ and captures status, headers, and
body. None are sent on; you must send them on yourself (unlike
``capture_output``)
Typically this is used like:
.. code-block:: python
def dehtmlifying_middleware(application):
def replacement_app(environ, start_response):
status, headers, body = intercept_output(
environ, application)
start_response(status, headers)
content_type = header_value(headers, 'content-type')
if (not content_type
or not content_type.startswith('text/html')):
return [body]
body = re.sub(r'<.*?>', '', body)
return [body]
return replacement_app
A third optional argument ``conditional`` should be a function
that takes ``conditional(status, headers)`` and returns False if
the request should not be intercepted. In that case
``start_response`` will be called and ``(None, None, app_iter)``
will be returned. You must detect that in your code and return
the app_iter, like:
.. code-block:: python
def dehtmlifying_middleware(application):
def replacement_app(environ, start_response):
status, headers, body = intercept_output(
environ, application,
lambda s, h: header_value(headers, 'content-type').startswith('text/html'),
start_response)
if status is None:
return body
start_response(status, headers)
body = re.sub(r'<.*?>', '', body)
return [body]
return replacement_app
"""
if conditional is not None and start_response is None:
raise TypeError(
"If you provide conditional you must also provide "
"start_response")
data = []
output = StringIO()
def replacement_start_response(status, headers, exc_info=None):
if conditional is not None and not conditional(status, headers):
data.append(None)
return start_response(status, headers, exc_info)
if data:
data[:] = []
data.append(status)
data.append(headers)
return output.write
app_iter = application(environ, replacement_start_response)
if data[0] is None:
return (None, None, app_iter)
try:
for item in app_iter:
output.write(item)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
if not data:
data.append(None)
if len(data) < 2:
data.append(None)
data.append(output.getvalue())
return data
## Deprecation warning wrapper:
class ResponseHeaderDict(HeaderDict):
def __init__(self, *args, **kw):
warnings.warn(
"The class wsgilib.ResponseHeaderDict has been moved "
"to paste.response.HeaderDict",
DeprecationWarning, 2)
HeaderDict.__init__(self, *args, **kw)
def _warn_deprecated(new_func):
new_name = new_func.func_name
new_path = new_func.func_globals['__name__'] + '.' + new_name
def replacement(*args, **kw):
warnings.warn(
"The function wsgilib.%s has been moved to %s"
% (new_name, new_path),
DeprecationWarning, 2)
return new_func(*args, **kw)
try:
replacement.func_name = new_func.func_name
except:
pass
return replacement
# Put warnings wrapper in place for all public functions that
# were imported from elsewhere:
for _name in __all__:
_func = globals()[_name]
if (hasattr(_func, 'func_globals')
and _func.func_globals['__name__'] != __name__):
globals()[_name] = _warn_deprecated(_func)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
3a5ba894ef51fd8e17a3748d8d0ca81f3f2c0051
|
5daf77b700fa270058d4267b81079e01cc0eb8c0
|
/channels/routing.py
|
efb428ac91830ecc399cfb8fc06442b25266c36a
|
[
"BSD-3-Clause"
] |
permissive
|
django/channels
|
ca834fc403b7a829ad6325d76d2ee03fc1894cee
|
093326080b4984fcb39f51955681800e39e14f43
|
refs/heads/main
| 2023-08-17T00:00:34.306150
| 2023-06-14T15:47:48
| 2023-06-14T15:47:48
| 36,818,673
| 4,853
| 970
|
BSD-3-Clause
| 2023-09-04T20:57:26
| 2015-06-03T17:16:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
routing.py
|
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.urls.exceptions import Resolver404
from django.urls.resolvers import URLResolver
"""
All Routing instances inside this file are also valid ASGI applications - with
new Channels routing, whatever you end up with as the top level object is just
served up as the "ASGI application".
"""
def get_default_application():
"""
Gets the default application, set in the ASGI_APPLICATION setting.
"""
try:
path, name = settings.ASGI_APPLICATION.rsplit(".", 1)
except (ValueError, AttributeError):
raise ImproperlyConfigured("Cannot find ASGI_APPLICATION setting.")
try:
module = importlib.import_module(path)
except ImportError:
raise ImproperlyConfigured("Cannot import ASGI_APPLICATION module %r" % path)
try:
value = getattr(module, name)
except AttributeError:
raise ImproperlyConfigured(
"Cannot find %r in ASGI_APPLICATION module %s" % (name, path)
)
return value
DEPRECATION_MSG = """
Using ProtocolTypeRouter without an explicit "http" key is deprecated.
Given that you have not passed the "http" you likely should use Django's
get_asgi_application():
from django.core.asgi import get_asgi_application
application = ProtocolTypeRouter(
"http": get_asgi_application()
# Other protocols here.
)
"""
class ProtocolTypeRouter:
"""
Takes a mapping of protocol type names to other Application instances,
and dispatches to the right one based on protocol name (or raises an error)
"""
def __init__(self, application_mapping):
self.application_mapping = application_mapping
async def __call__(self, scope, receive, send):
if scope["type"] in self.application_mapping:
application = self.application_mapping[scope["type"]]
return await application(scope, receive, send)
else:
raise ValueError(
"No application configured for scope type %r" % scope["type"]
)
class URLRouter:
"""
Routes to different applications/consumers based on the URL path.
Works with anything that has a ``path`` key, but intended for WebSocket
and HTTP. Uses Django's django.urls objects for resolution -
path() or re_path().
"""
#: This router wants to do routing based on scope[path] or
#: scope[path_remaining]. ``path()`` entries in URLRouter should not be
#: treated as endpoints (ended with ``$``), but similar to ``include()``.
_path_routing = True
def __init__(self, routes):
self.routes = routes
for route in self.routes:
# The inner ASGI app wants to do additional routing, route
# must not be an endpoint
if getattr(route.callback, "_path_routing", False) is True:
route.pattern._is_endpoint = False
if not route.callback and isinstance(route, URLResolver):
raise ImproperlyConfigured(
"%s: include() is not supported in URLRouter. Use nested"
" URLRouter instances instead." % (route,)
)
async def __call__(self, scope, receive, send):
# Get the path
path = scope.get("path_remaining", scope.get("path", None))
if path is None:
raise ValueError("No 'path' key in connection scope, cannot route URLs")
# Remove leading / to match Django's handling
path = path.lstrip("/")
# Run through the routes we have until one matches
for route in self.routes:
try:
match = route.pattern.match(path)
if match:
new_path, args, kwargs = match
# Add defaults to kwargs from the URL pattern.
kwargs.update(route.default_args)
# Add args or kwargs into the scope
outer = scope.get("url_route", {})
application = route.callback
return await application(
dict(
scope,
path_remaining=new_path,
url_route={
"args": outer.get("args", ()) + args,
"kwargs": {**outer.get("kwargs", {}), **kwargs},
},
),
receive,
send,
)
except Resolver404:
pass
else:
if "path_remaining" in scope:
raise Resolver404("No route found for path %r." % path)
# We are the outermost URLRouter
raise ValueError("No route found for path %r." % path)
class ChannelNameRouter:
"""
Maps to different applications based on a "channel" key in the scope
(intended for the Channels worker mode)
"""
def __init__(self, application_mapping):
self.application_mapping = application_mapping
async def __call__(self, scope, receive, send):
if "channel" not in scope:
raise ValueError(
"ChannelNameRouter got a scope without a 'channel' key. "
+ "Did you make sure it's only being used for 'channel' type messages?"
)
if scope["channel"] in self.application_mapping:
application = self.application_mapping[scope["channel"]]
return await application(scope, receive, send)
else:
raise ValueError(
"No application configured for channel name %r" % scope["channel"]
)
|
5584c2156960ae8c15092611d9d5ce92555cb7d5
|
28cf7b16dd29a5802d09b44b0186f6ae2c5ff0ed
|
/kuryr_kubernetes/cni/prometheus_exporter.py
|
d04b39621eb74e234a247fec44a07d7cc8e31afd
|
[
"Apache-2.0"
] |
permissive
|
openstack/kuryr-kubernetes
|
c292826abfb8aa0d3f8ef3b1007362162db16956
|
4993c7a4b2d7e4b053832bf39602f2573fad6266
|
refs/heads/master
| 2023-08-18T19:21:02.487908
| 2023-08-03T13:58:11
| 2023-08-03T13:58:11
| 58,626,548
| 169
| 78
|
Apache-2.0
| 2022-04-13T02:27:52
| 2016-05-12T09:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
prometheus_exporter.py
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import prometheus_client
from prometheus_client.exposition import generate_latest
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_INF = float("inf")
class CNIPrometheusExporter(object):
"""Provides metrics to Prometheus"""
def __init__(self):
self.application = flask.Flask('prometheus-exporter')
self.ctx = None
self.application.add_url_rule(
'/metrics', methods=['GET'], view_func=self.metrics)
self.headers = {'Connection': 'close'}
self._create_metric()
def update_metric(self, labels, duration):
"""Observes the request duration value and count it in buckets"""
self.cni_requests_duration.labels(**labels).observe(duration)
def metrics(self):
"""Provides the registered metrics"""
collected_metric = generate_latest(self.registry)
return flask.Response(collected_metric, mimetype='text/plain')
def run(self):
# Disable obtrusive werkzeug logs.
logging.getLogger('werkzeug').setLevel(logging.WARNING)
address = '::'
try:
LOG.info('Starting CNI Prometheus exporter')
self.application.run(
address, CONF.prometheus_exporter.cni_exporter_port)
except Exception:
LOG.exception('Failed to start Prometheus exporter')
raise
def _create_metric(self):
"""Creates a registry and records a new Histogram metric."""
self.registry = prometheus_client.CollectorRegistry()
metric_name = 'kuryr_cni_request_duration_seconds'
metric_description = 'The duration of CNI requests'
buckets = (10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120,
130, 140, 150, 160, 170, 180, _INF)
self.cni_requests_duration = prometheus_client.Histogram(
metric_name, metric_description,
labelnames={'command', 'error'}, buckets=buckets,
registry=self.registry)
|
182f49f82fed346078e1ea0f8fd9ed781226e205
|
c7d3ae6e46a505cceea3cb6193412f4b5983bed3
|
/albumentations/core/composition.py
|
dfc5e86d4c69da48e57ccc2be0df6c49dcc6abff
|
[
"MIT"
] |
permissive
|
albumentations-team/albumentations
|
5f4e969f8185f1070413f5d85fb410cad3c5aa41
|
e3b47b3a127f92541cfeb16abbb44a6f8bf79cc8
|
refs/heads/master
| 2023-09-01T07:32:03.676066
| 2023-06-10T07:12:14
| 2023-06-10T07:12:14
| 136,265,021
| 9,022
| 1,208
|
MIT
| 2023-06-10T07:12:15
| 2018-06-06T03:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 22,235
|
py
|
composition.py
|
from __future__ import division
import random
import typing
import warnings
from collections import defaultdict
import numpy as np
from .. import random_utils
from .bbox_utils import BboxParams, BboxProcessor
from .keypoints_utils import KeypointParams, KeypointsProcessor
from .serialization import (
SERIALIZABLE_REGISTRY,
Serializable,
get_shortest_class_fullname,
instantiate_nonserializable,
)
from .transforms_interface import BasicTransform
from .utils import format_args, get_shape
__all__ = [
"BaseCompose",
"Compose",
"SomeOf",
"OneOf",
"OneOrOther",
"BboxParams",
"KeypointParams",
"ReplayCompose",
"Sequential",
]
REPR_INDENT_STEP = 2
TransformType = typing.Union[BasicTransform, "BaseCompose"]
TransformsSeqType = typing.Sequence[TransformType]
def get_always_apply(transforms: typing.Union["BaseCompose", TransformsSeqType]) -> TransformsSeqType:
new_transforms: typing.List[TransformType] = []
for transform in transforms: # type: ignore
if isinstance(transform, BaseCompose):
new_transforms.extend(get_always_apply(transform))
elif transform.always_apply:
new_transforms.append(transform)
return new_transforms
class BaseCompose(Serializable):
def __init__(self, transforms: TransformsSeqType, p: float):
if isinstance(transforms, (BaseCompose, BasicTransform)):
warnings.warn(
"transforms is single transform, but a sequence is expected! Transform will be wrapped into list."
)
transforms = [transforms]
self.transforms = transforms
self.p = p
self.replay_mode = False
self.applied_in_replay = False
def __len__(self) -> int:
return len(self.transforms)
def __call__(self, *args, **data) -> typing.Dict[str, typing.Any]:
raise NotImplementedError
def __getitem__(self, item: int) -> TransformType: # type: ignore
return self.transforms[item]
def __repr__(self) -> str:
return self.indented_repr()
def indented_repr(self, indent: int = REPR_INDENT_STEP) -> str:
args = {k: v for k, v in self._to_dict().items() if not (k.startswith("__") or k == "transforms")}
repr_string = self.__class__.__name__ + "(["
for t in self.transforms:
repr_string += "\n"
if hasattr(t, "indented_repr"):
t_repr = t.indented_repr(indent + REPR_INDENT_STEP) # type: ignore
else:
t_repr = repr(t)
repr_string += " " * indent + t_repr + ","
repr_string += "\n" + " " * (indent - REPR_INDENT_STEP) + "], {args})".format(args=format_args(args))
return repr_string
@classmethod
def get_class_fullname(cls) -> str:
return get_shortest_class_fullname(cls)
@classmethod
def is_serializable(cls) -> bool:
return True
def _to_dict(self) -> typing.Dict[str, typing.Any]:
return {
"__class_fullname__": self.get_class_fullname(),
"p": self.p,
"transforms": [t._to_dict() for t in self.transforms], # skipcq: PYL-W0212
}
def get_dict_with_id(self) -> typing.Dict[str, typing.Any]:
return {
"__class_fullname__": self.get_class_fullname(),
"id": id(self),
"params": None,
"transforms": [t.get_dict_with_id() for t in self.transforms],
}
def add_targets(self, additional_targets: typing.Optional[typing.Dict[str, str]]) -> None:
if additional_targets:
for t in self.transforms:
t.add_targets(additional_targets)
def set_deterministic(self, flag: bool, save_key: str = "replay") -> None:
for t in self.transforms:
t.set_deterministic(flag, save_key)
class Compose(BaseCompose):
"""Compose transforms and handle all transformations regarding bounding boxes
Args:
transforms (list): list of transformations to compose.
bbox_params (BboxParams): Parameters for bounding boxes transforms
keypoint_params (KeypointParams): Parameters for keypoints transforms
additional_targets (dict): Dict with keys - new target name, values - old target name. ex: {'image2': 'image'}
p (float): probability of applying all list of transforms. Default: 1.0.
is_check_shapes (bool): If True shapes consistency of images/mask/masks would be checked on each call. If you
would like to disable this check - pass False (do it only if you are sure in your data consistency).
"""
def __init__(
self,
transforms: TransformsSeqType,
bbox_params: typing.Optional[typing.Union[dict, "BboxParams"]] = None,
keypoint_params: typing.Optional[typing.Union[dict, "KeypointParams"]] = None,
additional_targets: typing.Optional[typing.Dict[str, str]] = None,
p: float = 1.0,
is_check_shapes: bool = True,
):
super(Compose, self).__init__(transforms, p)
self.processors: typing.Dict[str, typing.Union[BboxProcessor, KeypointsProcessor]] = {}
if bbox_params:
if isinstance(bbox_params, dict):
b_params = BboxParams(**bbox_params)
elif isinstance(bbox_params, BboxParams):
b_params = bbox_params
else:
raise ValueError("unknown format of bbox_params, please use `dict` or `BboxParams`")
self.processors["bboxes"] = BboxProcessor(b_params, additional_targets)
if keypoint_params:
if isinstance(keypoint_params, dict):
k_params = KeypointParams(**keypoint_params)
elif isinstance(keypoint_params, KeypointParams):
k_params = keypoint_params
else:
raise ValueError("unknown format of keypoint_params, please use `dict` or `KeypointParams`")
self.processors["keypoints"] = KeypointsProcessor(k_params, additional_targets)
if additional_targets is None:
additional_targets = {}
self.additional_targets = additional_targets
for proc in self.processors.values():
proc.ensure_transforms_valid(self.transforms)
self.add_targets(additional_targets)
self.is_check_args = True
self._disable_check_args_for_transforms(self.transforms)
self.is_check_shapes = is_check_shapes
@staticmethod
def _disable_check_args_for_transforms(transforms: TransformsSeqType) -> None:
for transform in transforms:
if isinstance(transform, BaseCompose):
Compose._disable_check_args_for_transforms(transform.transforms)
if isinstance(transform, Compose):
transform._disable_check_args()
def _disable_check_args(self) -> None:
self.is_check_args = False
def __call__(self, *args, force_apply: bool = False, **data) -> typing.Dict[str, typing.Any]:
if args:
raise KeyError("You have to pass data to augmentations as named arguments, for example: aug(image=image)")
if self.is_check_args:
self._check_args(**data)
assert isinstance(force_apply, (bool, int)), "force_apply must have bool or int type"
need_to_run = force_apply or random.random() < self.p
for p in self.processors.values():
p.ensure_data_valid(data)
transforms = self.transforms if need_to_run else get_always_apply(self.transforms)
check_each_transform = any(
getattr(item.params, "check_each_transform", False) for item in self.processors.values()
)
for p in self.processors.values():
p.preprocess(data)
for idx, t in enumerate(transforms):
data = t(**data)
if check_each_transform:
data = self._check_data_post_transform(data)
data = Compose._make_targets_contiguous(data) # ensure output targets are contiguous
for p in self.processors.values():
p.postprocess(data)
return data
def _check_data_post_transform(self, data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
rows, cols = get_shape(data["image"])
for p in self.processors.values():
if not getattr(p.params, "check_each_transform", False):
continue
for data_name in p.data_fields:
data[data_name] = p.filter(data[data_name], rows, cols)
return data
def _to_dict(self) -> typing.Dict[str, typing.Any]:
dictionary = super(Compose, self)._to_dict()
bbox_processor = self.processors.get("bboxes")
keypoints_processor = self.processors.get("keypoints")
dictionary.update(
{
"bbox_params": bbox_processor.params._to_dict() if bbox_processor else None, # skipcq: PYL-W0212
"keypoint_params": keypoints_processor.params._to_dict() # skipcq: PYL-W0212
if keypoints_processor
else None,
"additional_targets": self.additional_targets,
"is_check_shapes": self.is_check_shapes,
}
)
return dictionary
def get_dict_with_id(self) -> typing.Dict[str, typing.Any]:
dictionary = super().get_dict_with_id()
bbox_processor = self.processors.get("bboxes")
keypoints_processor = self.processors.get("keypoints")
dictionary.update(
{
"bbox_params": bbox_processor.params._to_dict() if bbox_processor else None, # skipcq: PYL-W0212
"keypoint_params": keypoints_processor.params._to_dict() # skipcq: PYL-W0212
if keypoints_processor
else None,
"additional_targets": self.additional_targets,
"params": None,
"is_check_shapes": self.is_check_shapes,
}
)
return dictionary
def _check_args(self, **kwargs) -> None:
checked_single = ["image", "mask"]
checked_multi = ["masks"]
check_bbox_param = ["bboxes"]
# ["bboxes", "keypoints"] could be almost any type, no need to check them
shapes = []
for data_name, data in kwargs.items():
internal_data_name = self.additional_targets.get(data_name, data_name)
if internal_data_name in checked_single:
if not isinstance(data, np.ndarray):
raise TypeError("{} must be numpy array type".format(data_name))
shapes.append(data.shape[:2])
if internal_data_name in checked_multi:
if data is not None:
if not isinstance(data[0], np.ndarray):
raise TypeError("{} must be list of numpy arrays".format(data_name))
shapes.append(data[0].shape[:2])
if internal_data_name in check_bbox_param and self.processors.get("bboxes") is None:
raise ValueError("bbox_params must be specified for bbox transformations")
if self.is_check_shapes and shapes and shapes.count(shapes[0]) != len(shapes):
raise ValueError(
"Height and Width of image, mask or masks should be equal. You can disable shapes check "
"by setting a parameter is_check_shapes=False of Compose class (do it only if you are sure "
"about your data consistency)."
)
@staticmethod
def _make_targets_contiguous(data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
result = {}
for key, value in data.items():
if isinstance(value, np.ndarray):
value = np.ascontiguousarray(value)
result[key] = value
return result
class OneOf(BaseCompose):
"""Select one of transforms to apply. Selected transform will be called with `force_apply=True`.
Transforms probabilities will be normalized to one 1, so in this case transforms probabilities works as weights.
Args:
transforms (list): list of transformations to compose.
p (float): probability of applying selected transform. Default: 0.5.
"""
def __init__(self, transforms: TransformsSeqType, p: float = 0.5):
super(OneOf, self).__init__(transforms, p)
transforms_ps = [t.p for t in self.transforms]
s = sum(transforms_ps)
self.transforms_ps = [t / s for t in transforms_ps]
def __call__(self, *args, force_apply: bool = False, **data) -> typing.Dict[str, typing.Any]:
if self.replay_mode:
for t in self.transforms:
data = t(**data)
return data
if self.transforms_ps and (force_apply or random.random() < self.p):
idx: int = random_utils.choice(len(self.transforms), p=self.transforms_ps)
t = self.transforms[idx]
data = t(force_apply=True, **data)
return data
class SomeOf(BaseCompose):
"""Select N transforms to apply. Selected transforms will be called with `force_apply=True`.
Transforms probabilities will be normalized to one 1, so in this case transforms probabilities works as weights.
Args:
transforms (list): list of transformations to compose.
n (int): number of transforms to apply.
replace (bool): Whether the sampled transforms are with or without replacement. Default: True.
p (float): probability of applying selected transform. Default: 1.
"""
def __init__(self, transforms: TransformsSeqType, n: int, replace: bool = True, p: float = 1):
super(SomeOf, self).__init__(transforms, p)
self.n = n
self.replace = replace
transforms_ps = [t.p for t in self.transforms]
s = sum(transforms_ps)
self.transforms_ps = [t / s for t in transforms_ps]
def __call__(self, *args, force_apply: bool = False, **data) -> typing.Dict[str, typing.Any]:
if self.replay_mode:
for t in self.transforms:
data = t(**data)
return data
if self.transforms_ps and (force_apply or random.random() < self.p):
idx = random_utils.choice(len(self.transforms), size=self.n, replace=self.replace, p=self.transforms_ps)
for i in idx: # type: ignore
t = self.transforms[i]
data = t(force_apply=True, **data)
return data
def _to_dict(self) -> typing.Dict[str, typing.Any]:
dictionary = super(SomeOf, self)._to_dict()
dictionary.update({"n": self.n, "replace": self.replace})
return dictionary
class OneOrOther(BaseCompose):
"""Select one or another transform to apply. Selected transform will be called with `force_apply=True`."""
def __init__(
self,
first: typing.Optional[TransformType] = None,
second: typing.Optional[TransformType] = None,
transforms: typing.Optional[TransformsSeqType] = None,
p: float = 0.5,
):
if transforms is None:
if first is None or second is None:
raise ValueError("You must set both first and second or set transforms argument.")
transforms = [first, second]
super(OneOrOther, self).__init__(transforms, p)
if len(self.transforms) != 2:
warnings.warn("Length of transforms is not equal to 2.")
def __call__(self, *args, force_apply: bool = False, **data) -> typing.Dict[str, typing.Any]:
if self.replay_mode:
for t in self.transforms:
data = t(**data)
return data
if random.random() < self.p:
return self.transforms[0](force_apply=True, **data)
return self.transforms[-1](force_apply=True, **data)
class PerChannel(BaseCompose):
"""Apply transformations per-channel
Args:
transforms (list): list of transformations to compose.
channels (sequence): channels to apply the transform to. Pass None to apply to all.
Default: None (apply to all)
p (float): probability of applying the transform. Default: 0.5.
"""
def __init__(
self, transforms: TransformsSeqType, channels: typing.Optional[typing.Sequence[int]] = None, p: float = 0.5
):
super(PerChannel, self).__init__(transforms, p)
self.channels = channels
def __call__(self, *args, force_apply: bool = False, **data) -> typing.Dict[str, typing.Any]:
if force_apply or random.random() < self.p:
image = data["image"]
# Expand mono images to have a single channel
if len(image.shape) == 2:
image = np.expand_dims(image, -1)
if self.channels is None:
self.channels = range(image.shape[2])
for c in self.channels:
for t in self.transforms:
image[:, :, c] = t(image=image[:, :, c])["image"]
data["image"] = image
return data
class ReplayCompose(Compose):
def __init__(
self,
transforms: TransformsSeqType,
bbox_params: typing.Optional[typing.Union[dict, "BboxParams"]] = None,
keypoint_params: typing.Optional[typing.Union[dict, "KeypointParams"]] = None,
additional_targets: typing.Optional[typing.Dict[str, str]] = None,
p: float = 1.0,
is_check_shapes: bool = True,
save_key: str = "replay",
):
super(ReplayCompose, self).__init__(
transforms, bbox_params, keypoint_params, additional_targets, p, is_check_shapes
)
self.set_deterministic(True, save_key=save_key)
self.save_key = save_key
def __call__(self, *args, force_apply: bool = False, **kwargs) -> typing.Dict[str, typing.Any]:
kwargs[self.save_key] = defaultdict(dict)
result = super(ReplayCompose, self).__call__(force_apply=force_apply, **kwargs)
serialized = self.get_dict_with_id()
self.fill_with_params(serialized, result[self.save_key])
self.fill_applied(serialized)
result[self.save_key] = serialized
return result
@staticmethod
def replay(saved_augmentations: typing.Dict[str, typing.Any], **kwargs) -> typing.Dict[str, typing.Any]:
augs = ReplayCompose._restore_for_replay(saved_augmentations)
return augs(force_apply=True, **kwargs)
@staticmethod
def _restore_for_replay(
transform_dict: typing.Dict[str, typing.Any], lambda_transforms: typing.Optional[dict] = None
) -> TransformType:
"""
Args:
lambda_transforms (dict): A dictionary that contains lambda transforms, that
is instances of the Lambda class.
This dictionary is required when you are restoring a pipeline that contains lambda transforms. Keys
in that dictionary should be named same as `name` arguments in respective lambda transforms from
a serialized pipeline.
"""
applied = transform_dict["applied"]
params = transform_dict["params"]
lmbd = instantiate_nonserializable(transform_dict, lambda_transforms)
if lmbd:
transform = lmbd
else:
name = transform_dict["__class_fullname__"]
args = {k: v for k, v in transform_dict.items() if k not in ["__class_fullname__", "applied", "params"]}
cls = SERIALIZABLE_REGISTRY[name]
if "transforms" in args:
args["transforms"] = [
ReplayCompose._restore_for_replay(t, lambda_transforms=lambda_transforms)
for t in args["transforms"]
]
transform = cls(**args)
transform = typing.cast(BasicTransform, transform)
if isinstance(transform, BasicTransform):
transform.params = params
transform.replay_mode = True
transform.applied_in_replay = applied
return transform
def fill_with_params(self, serialized: dict, all_params: dict) -> None:
params = all_params.get(serialized.get("id"))
serialized["params"] = params
del serialized["id"]
for transform in serialized.get("transforms", []):
self.fill_with_params(transform, all_params)
def fill_applied(self, serialized: typing.Dict[str, typing.Any]) -> bool:
if "transforms" in serialized:
applied = [self.fill_applied(t) for t in serialized["transforms"]]
serialized["applied"] = any(applied)
else:
serialized["applied"] = serialized.get("params") is not None
return serialized["applied"]
def _to_dict(self) -> typing.Dict[str, typing.Any]:
dictionary = super(ReplayCompose, self)._to_dict()
dictionary.update({"save_key": self.save_key})
return dictionary
class Sequential(BaseCompose):
"""Sequentially applies all transforms to targets.
Note:
This transform is not intended to be a replacement for `Compose`. Instead, it should be used inside `Compose`
the same way `OneOf` or `OneOrOther` are used. For instance, you can combine `OneOf` with `Sequential` to
create an augmentation pipeline that contains multiple sequences of augmentations and applies one randomly
chose sequence to input data (see the `Example` section for an example definition of such pipeline).
Example:
>>> import albumentations as A
>>> transform = A.Compose([
>>> A.OneOf([
>>> A.Sequential([
>>> A.HorizontalFlip(p=0.5),
>>> A.ShiftScaleRotate(p=0.5),
>>> ]),
>>> A.Sequential([
>>> A.VerticalFlip(p=0.5),
>>> A.RandomBrightnessContrast(p=0.5),
>>> ]),
>>> ], p=1)
>>> ])
"""
def __init__(self, transforms: TransformsSeqType, p: float = 0.5):
super().__init__(transforms, p)
def __call__(self, *args, **data) -> typing.Dict[str, typing.Any]:
for t in self.transforms:
data = t(**data)
return data
|
9562411e40e6d04d3f0a126c2278aeaeee38a485
|
3a8678a73ff5caa3df02da97a0a0b49ab4482994
|
/python/tests/table/test_refs.py
|
d106f0237abe79d0fb6416bda8e7589043d4cba4
|
[
"Apache-2.0"
] |
permissive
|
apache/iceberg
|
b21a9c1bfbb328919f51cd257772dfd1bd86aaff
|
c9ce6a123b49c1c4e5bd950b388d69e6ff849b5d
|
refs/heads/master
| 2023-09-03T15:54:18.098529
| 2023-09-03T12:37:39
| 2023-09-03T12:37:39
| 158,256,479
| 4,358
| 1,659
|
Apache-2.0
| 2023-09-14T16:31:51
| 2018-11-19T16:26:46
|
Java
|
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
test_refs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=eval-used
from pyiceberg.table.refs import SnapshotRef, SnapshotRefType
def test_snapshot_with_properties_repr() -> None:
snapshot_ref = SnapshotRef(
snapshot_id=3051729675574597004,
snapshot_ref_type=SnapshotRefType.TAG,
min_snapshots_to_keep=None,
max_snapshot_age_ms=None,
max_ref_age_ms=10000000,
)
assert (
repr(snapshot_ref)
== """SnapshotRef(snapshot_id=3051729675574597004, snapshot_ref_type=SnapshotRefType.TAG, min_snapshots_to_keep=None, max_snapshot_age_ms=None, max_ref_age_ms=10000000)"""
)
assert snapshot_ref == eval(repr(snapshot_ref))
|
2323eb8a91be5c8645dc8152c040a09d80c771e7
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/niftyseg/tests/test_em_interfaces.py
|
c90d93a6baa3beb1172bf317505a05da815c120f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
test_em_interfaces.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import pytest
from ....testing import example_data
from ...niftyreg import get_custom_path
from ...niftyreg.tests.test_regutils import no_nifty_tool
from .. import EM
@pytest.mark.skipif(no_nifty_tool(cmd="seg_EM"), reason="niftyseg is not installed")
def test_seg_em():
# Create a node object
seg_em = EM()
# Check if the command is properly defined
cmd = get_custom_path("seg_EM", env_dir="NIFTYSEGDIR")
assert seg_em.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
seg_em.run()
# Assign some input data
in_file = example_data("im1.nii")
seg_em.inputs.in_file = in_file
seg_em.inputs.no_prior = 4
cmd_tmp = "{cmd} -in {in_file} -nopriors 4 -bc_out {bc_out} -out \
{out_file} -out_outlier {out_outlier}"
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
out_file="im1_em.nii.gz",
bc_out="im1_bc_em.nii.gz",
out_outlier="im1_outlier_em.nii.gz",
)
assert seg_em.cmdline == expected_cmd
|
1086832bb6a5e0fbeebe34beb5ffd6869166c94a
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/storage/azure-storage-blob/tests/perfstress_tests/T1_legacy_tests/_test_base_legacy.py
|
2cc007240ba901b0fdb1174f38c699e6378300b8
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,057
|
py
|
_test_base_legacy.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import uuid
import functools
import requests
from azure_devtools.perfstress_tests import PerfStressTest
from azure.storage.blob import BlockBlobService
def test_proxy_callback(proxy_policy, request):
if proxy_policy.recording_id and proxy_policy.mode:
live_endpoint = request.host
request.host = proxy_policy._proxy_url.netloc
request.headers["x-recording-id"] = proxy_policy.recording_id
request.headers["x-recording-mode"] = proxy_policy.mode
request.headers["x-recording-remove"] = "false"
# Ensure x-recording-upstream-base-uri header is only set once, since the
# same HttpMessage will be reused on retries
if "x-recording-upstream-base-uri" not in request.headers:
original_endpoint = "https://{}".format(live_endpoint)
request.headers["x-recording-upstream-base-uri"] = original_endpoint
class _LegacyServiceTest(PerfStressTest):
service_client = None
async_service_client = None
def __init__(self, arguments):
super().__init__(arguments)
connection_string = self.get_from_env("AZURE_STORAGE_CONNECTION_STRING")
session = None
if self.args.test_proxies:
session = requests.Session()
session.verify = False
if not _LegacyServiceTest.service_client or self.args.no_client_share:
_LegacyServiceTest.service_client = BlockBlobService(
connection_string=connection_string,
request_session=session)
_LegacyServiceTest.service_client.MAX_SINGLE_PUT_SIZE = self.args.max_put_size
_LegacyServiceTest.service_client.MAX_BLOCK_SIZE = self.args.max_block_size
_LegacyServiceTest.service_client.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = self.args.buffer_threshold
self.async_service_client = None
self.service_client = _LegacyServiceTest.service_client
if self.args.test_proxies:
self.service_client.request_callback = functools.partial(
test_proxy_callback,
self._test_proxy_policy
)
@staticmethod
def add_arguments(parser):
super(_LegacyServiceTest, _LegacyServiceTest).add_arguments(parser)
parser.add_argument('--max-put-size', nargs='?', type=int, help='Maximum size of data uploading in single HTTP PUT. Defaults to 64*1024*1024', default=64*1024*1024)
parser.add_argument('--max-block-size', nargs='?', type=int, help='Maximum size of data in a block within a blob. Defaults to 4*1024*1024', default=4*1024*1024)
parser.add_argument('--buffer-threshold', nargs='?', type=int, help='Minimum block size to prevent full block buffering. Defaults to 4*1024*1024+1', default=4*1024*1024+1)
parser.add_argument('--max-concurrency', nargs='?', type=int, help='Maximum number of concurrent threads used for data transfer. Defaults to 1', default=1)
parser.add_argument('-s', '--size', nargs='?', type=int, help='Size of data to transfer. Default is 10240.', default=10240)
parser.add_argument('--no-client-share', action='store_true', help='Create one ServiceClient per test instance. Default is to share a single ServiceClient.', default=False)
class _LegacyContainerTest(_LegacyServiceTest):
container_name = "perfstress-legacy-" + str(uuid.uuid4())
def __init__(self, arguments):
super().__init__(arguments)
async def global_setup(self):
await super().global_setup()
self.service_client.create_container(self.container_name)
async def global_cleanup(self):
self.service_client.delete_container(self.container_name)
await super().global_cleanup()
|
e1f04d21608271ff398cda655743a27acc743ea9
|
8a151e6ba14ff88d06581f4ace273c7b04a3398c
|
/deepface/detectors/FaceDetector.py
|
522592d1889e9bb45c27711c90a73406c5274eca
|
[
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
serengil/deepface
|
dd96d6c94fdb3214b4805e3ec133067bc7e4388d
|
abf9c6a1d55371d1252deefbe69fd468d2384759
|
refs/heads/master
| 2023-08-08T11:48:06.914057
| 2023-08-07T09:03:21
| 2023-08-07T09:03:21
| 239,201,565
| 7,350
| 1,589
|
MIT
| 2023-09-08T07:01:50
| 2020-02-08T20:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,985
|
py
|
FaceDetector.py
|
import math
from PIL import Image
import numpy as np
from deepface.commons import distance
from deepface.detectors import (
OpenCvWrapper,
SsdWrapper,
DlibWrapper,
MtcnnWrapper,
RetinaFaceWrapper,
MediapipeWrapper,
YoloWrapper,
YunetWrapper,
)
def build_model(detector_backend):
global face_detector_obj # singleton design pattern
backends = {
"opencv": OpenCvWrapper.build_model,
"ssd": SsdWrapper.build_model,
"dlib": DlibWrapper.build_model,
"mtcnn": MtcnnWrapper.build_model,
"retinaface": RetinaFaceWrapper.build_model,
"mediapipe": MediapipeWrapper.build_model,
"yolov8": YoloWrapper.build_model,
"yunet": YunetWrapper.build_model,
}
if not "face_detector_obj" in globals():
face_detector_obj = {}
built_models = list(face_detector_obj.keys())
if detector_backend not in built_models:
face_detector = backends.get(detector_backend)
if face_detector:
face_detector = face_detector()
face_detector_obj[detector_backend] = face_detector
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
return face_detector_obj[detector_backend]
def detect_face(face_detector, detector_backend, img, align=True):
obj = detect_faces(face_detector, detector_backend, img, align)
if len(obj) > 0:
face, region, confidence = obj[0] # discard multiple faces
# If no face is detected, set face to None,
# image region to full image, and confidence to 0.
else: # len(obj) == 0
face = None
region = [0, 0, img.shape[1], img.shape[0]]
confidence = 0
return face, region, confidence
def detect_faces(face_detector, detector_backend, img, align=True):
backends = {
"opencv": OpenCvWrapper.detect_face,
"ssd": SsdWrapper.detect_face,
"dlib": DlibWrapper.detect_face,
"mtcnn": MtcnnWrapper.detect_face,
"retinaface": RetinaFaceWrapper.detect_face,
"mediapipe": MediapipeWrapper.detect_face,
"yolov8": YoloWrapper.detect_face,
"yunet": YunetWrapper.detect_face,
}
detect_face_fn = backends.get(detector_backend)
if detect_face_fn: # pylint: disable=no-else-return
obj = detect_face_fn(face_detector, img, align)
# obj stores list of (detected_face, region, confidence)
return obj
else:
raise ValueError("invalid detector_backend passed - " + detector_backend)
def alignment_procedure(img, left_eye, right_eye):
# this function aligns given face in img based on left and right eye coordinates
left_eye_x, left_eye_y = left_eye
right_eye_x, right_eye_y = right_eye
# -----------------------
# find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 # rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 # rotate inverse direction of clock
# -----------------------
# find length of triangle edges
a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd))
b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd))
c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye))
# -----------------------
# apply cosine rule
if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation
cos_a = (b * b + c * c - a * a) / (2 * b * c)
angle = np.arccos(cos_a) # angle in radian
angle = (angle * 180) / math.pi # radian to degree
# -----------------------
# rotate base image
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img)
img = np.array(img.rotate(direction * angle))
# -----------------------
return img # return img anyway
|
1fe1c2dba8bd220723272ffffda7694af59ef44e
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/c7n/resources/glacier.py
|
8bed5fbb5f64e8ae6bafe2f6695791fcc3a8e31a
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,812
|
py
|
glacier.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from botocore.exceptions import ClientError
import json
from c7n.actions import RemovePolicyBase
from c7n.filters import CrossAccountAccessFilter
from c7n.query import QueryResourceManager, TypeInfo
from c7n.manager import resources
from c7n.utils import get_retry, local_session, type_schema
@resources.register('glacier')
class Glacier(QueryResourceManager):
permissions = ('glacier:ListTagsForVault',)
retry = staticmethod(get_retry(('Throttled',)))
class resource_type(TypeInfo):
service = 'glacier'
enum_spec = ('list_vaults', 'VaultList', None)
name = id = "VaultName"
arn = "VaultARN"
arn_type = 'vaults'
universal_taggable = True
def augment(self, resources):
def process_tags(resource):
client = local_session(self.session_factory).client('glacier')
tag_dict = self.retry(
client.list_tags_for_vault,
vaultName=resource[self.get_model().name])['Tags']
tag_list = []
for k, v in tag_dict.items():
tag_list.append({'Key': k, 'Value': v})
resource['Tags'] = tag_list
return resource
with self.executor_factory(max_workers=2) as w:
return list(w.map(process_tags, resources))
@Glacier.filter_registry.register('cross-account')
class GlacierCrossAccountAccessFilter(CrossAccountAccessFilter):
"""Filter to return all glacier vaults with cross account access permissions
The whitelist parameter will omit the accounts that match from the return
:example:
.. code-block:
policies:
- name: check-glacier-cross-account
resource: glacier
filters:
- type: cross-account
whitelist:
- permitted-account-01
- permitted-account-02
"""
permissions = ('glacier:GetVaultAccessPolicy',)
def process(self, resources, event=None):
def _augment(r):
client = local_session(
self.manager.session_factory).client('glacier')
try:
r['Policy'] = client.get_vault_access_policy(
vaultName=r['VaultName'])['policy']['Policy']
return r
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDeniedException':
self.log.warning(
"Access denied getting policy glacier:%s",
r['FunctionName'])
self.log.debug("fetching policy for %d glacier" % len(resources))
with self.executor_factory(max_workers=3) as w:
resources = list(filter(None, w.map(_augment, resources)))
return super(GlacierCrossAccountAccessFilter, self).process(
resources, event)
@Glacier.action_registry.register('remove-statements')
class RemovePolicyStatement(RemovePolicyBase):
"""Action to remove policy statements from Glacier
:example:
.. code-block:: yaml
policies:
- name: glacier-cross-account
resource: glacier
filters:
- type: cross-account
actions:
- type: remove-statements
statement_ids: matched
"""
permissions = ('glacier:SetVaultAccessPolicy', 'glacier:GetVaultAccessPolicy')
def process(self, resources):
results = []
client = local_session(self.manager.session_factory).client('glacier')
for r in resources:
try:
results += filter(None, [self.process_resource(client, r)])
except Exception:
self.log.exception(
"Error processing glacier:%s", r['VaultARN'])
return results
def process_resource(self, client, resource):
if 'Policy' not in resource:
try:
resource['Policy'] = client.get_vault_access_policy(
vaultName=resource['VaultName'])['policy']['Policy']
except ClientError as e:
if e.response['Error']['Code'] != "ResourceNotFoundException":
raise
resource['Policy'] = None
if not resource['Policy']:
return
p = json.loads(resource['Policy'])
statements, found = self.process_policy(
p, resource, CrossAccountAccessFilter.annotation_key)
if not found:
return
if not statements:
client.delete_vault_access_policy(
vaultName=resource['VaultName'])
else:
client.set_vault_access_policy(
vaultName=resource['VaultName'],
policy={'Policy': json.dumps(p)}
)
return {'Name': resource['VaultName'],
'State': 'PolicyRemoved',
'Statements': found}
@Glacier.action_registry.register('delete')
class GlacierVaultDelete(RemovePolicyBase):
"""Action to delete glacier vaults
:example:
.. code-block:: yaml
policies:
- name: glacier-vault-delete
resource: aws.glacier
filters:
- type: cross-account
actions:
- type: delete
"""
schema = type_schema('delete')
permissions = ('glacier:DeleteVault',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('glacier')
for r in resources:
self.manager.retry(client.delete_vault, vaultName=r['VaultName'], ignore_err_codes=(
'ResourceNotFoundException',))
|
115b710d54655e57aebb2a8563322b849baade47
|
6c88b2cea38b2cead9e2402d46a8fc64949c53df
|
/sdk/python/lib/pulumi/runtime/stack.py
|
5422debce3509ae88822ef688e306f922e1db584
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi
|
a9b36c32f0cdd445c22f9ca64ce26c9ae5147575
|
46e2753d02d46a1c077930eeccdfe6738f46c0d2
|
refs/heads/master
| 2023-08-19T10:25:49.849189
| 2023-08-16T04:59:07
| 2023-08-16T04:59:07
| 72,477,752
| 17,553
| 1,082
|
Apache-2.0
| 2023-09-14T21:05:35
| 2016-10-31T21:02:47
|
Go
|
UTF-8
|
Python
| false
| false
| 10,463
|
py
|
stack.py
|
# Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for automatic stack components.
"""
import asyncio
from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Callable, Dict, List
from .. import log
from ..resource import ComponentResource, Resource, ResourceTransformation
from .settings import (
_get_rpc_manager,
get_project,
get_root_resource,
get_stack,
is_dry_run,
set_root_resource,
)
from .sync_await import _all_tasks, _get_current_task
if TYPE_CHECKING:
from .. import Output
def _get_running_tasks() -> List[asyncio.Task]:
pending = []
for task in _all_tasks():
# Don't kill ourselves, that would be silly.
if not task == _get_current_task():
pending.append(task)
return pending
async def run_pulumi_func(func: Callable):
try:
func()
finally:
await wait_for_rpcs()
# By now, all tasks have exited and we're good to go.
log.debug("run_pulumi_func completed")
async def wait_for_rpcs(await_all_outstanding_tasks=True) -> None:
log.debug("Waiting for outstanding RPCs to complete")
rpc_manager = _get_rpc_manager()
while True:
# Pump the event loop, giving all of the RPCs that we just queued up time to fully execute.
# The asyncio scheduler does not expose a "yield" primitive, so this will have to do.
#
# Note that "asyncio.sleep(0)" is the blessed way to do this:
# https://github.com/python/asyncio/issues/284#issuecomment-154180935
#
# We await each RPC in turn so that this loop will actually block rather than busy-wait.
while len(rpc_manager.rpcs) > 0:
await asyncio.sleep(0)
log.debug(
f"waiting for quiescence; {len(rpc_manager.rpcs)} RPCs outstanding"
)
try:
await rpc_manager.rpcs.pop()
except Exception as exn:
# If the RPC failed, re-raise the original traceback
# instead of the await above.
if rpc_manager.unhandled_exception is not None:
cause = rpc_manager.unhandled_exception.with_traceback(
rpc_manager.exception_traceback,
)
raise exn from cause
raise
if rpc_manager.unhandled_exception is not None:
raise rpc_manager.unhandled_exception.with_traceback(
rpc_manager.exception_traceback
)
log.debug("RPCs successfully completed")
# If the RPCs have successfully completed, now await all remaining outstanding tasks.
if await_all_outstanding_tasks:
outstanding_tasks = _get_running_tasks()
if len(outstanding_tasks) == 0:
log.debug("No outstanding tasks to complete")
else:
log.debug(
f"Waiting for {len(outstanding_tasks)} outstanding tasks to complete"
)
done, pending = await asyncio.wait(
outstanding_tasks, return_when="FIRST_EXCEPTION"
)
if len(pending) > 0:
# If there are any pending tasks, it's because an exception was thrown.
# Cancel any pending tasks.
log.debug(f"Cancelling {len(pending)} remaining tasks.")
for task in pending:
task.cancel()
for task in done:
exception = task.exception()
if exception is not None:
log.debug(
"A future resolved in an exception, raising exception."
)
raise exception
log.debug("All outstanding tasks completed.")
# Check to see if any more RPCs have been scheduled, and repeat the cycle if so.
# Break if no RPCs remain.
if len(rpc_manager.rpcs) == 0:
break
async def run_in_stack(func: Callable):
"""
Run the given function inside of a new stack resource. This ensures that any stack export calls
will end up as output properties on the resulting stack component in the checkpoint file. This
is meant for internal runtime use only and is used by the Python SDK entrypoint program.
"""
await run_pulumi_func(lambda: Stack(func))
class Stack(ComponentResource):
"""
A synthetic stack component that automatically parents resources as the program runs.
"""
outputs: Dict[str, Any]
def __init__(self, func: Callable) -> None:
# Ensure we don't already have a stack registered.
if get_root_resource() is not None:
raise Exception("Only one root Pulumi Stack may be active at once")
# Now invoke the registration to begin creating this resource.
name = f"{get_project()}-{get_stack()}"
super().__init__("pulumi:pulumi:Stack", name, None, None)
# Invoke the function while this stack is active and then register its outputs.
self.outputs = {}
set_root_resource(self)
try:
func()
finally:
self.register_outputs(massage(self.outputs, []))
# Intentionally leave this resource installed in case subsequent async work uses it.
def output(self, name: str, value: Any):
"""
Export a stack output with a given name and value.
"""
self.outputs[name] = value
# Note: we use a List here instead of a set as many objects are unhashable. This is inefficient,
# but python seems to offer no alternative.
def massage(attr: Any, seen: List[Any]):
"""
massage takes an arbitrary python value and attempts to *deeply* convert it into
plain-old-python-value that can registered as an output. In general, this means leaving alone
things like strings, ints, bools. However, it does mean trying to make other values into either
lists or dictionaries as appropriate. In general, iterable things are turned into lists, and
dictionary-like things are turned into dictionaries.
"""
from .. import Output # pylint: disable=import-outside-toplevel
# Basic primitive types (numbers, booleans, strings, etc.) don't need any special handling.
if is_primitive(attr):
return attr
if isinstance(attr, Output):
return attr.apply(lambda v: massage(v, seen))
if isawaitable(attr):
return Output.from_input(attr).apply(lambda v: massage(v, seen))
# from this point on, we have complex objects. If we see them again, we don't want to emit them
# again fully or else we'd loop infinitely.
if reference_contains(attr, seen):
# Note: for Resources we hit again, emit their urn so cycles can be easily understood in
# the popo objects.
if isinstance(attr, Resource):
return massage(attr.urn, seen)
# otherwise just emit as nothing to stop the looping.
return None
try:
seen.append(attr)
return massage_complex(attr, seen)
finally:
popped = seen.pop()
if popped is not attr:
raise Exception("Invariant broken when processing stack outputs")
def massage_complex(attr: Any, seen: List[Any]) -> Any:
def is_public_key(key: str) -> bool:
return not key.startswith("_")
def serialize_all_keys(include: Callable[[str], bool]):
plain_object: Dict[str, Any] = {}
for key in attr.__dict__.keys():
if include(key):
plain_object[key] = massage(attr.__dict__[key], seen)
return plain_object
if isinstance(attr, Resource):
serialized_attr = serialize_all_keys(is_public_key)
# In preview only, we mark the result with "@isPulumiResource" to indicate that it is derived
# from a resource. This allows the engine to perform resource-specific filtering of unknowns
# from output diffs during a preview. This filtering is not necessary during an update because
# all property values are known.
return (
serialized_attr
if not is_dry_run()
else {**serialized_attr, "@isPulumiResource": True}
)
# first check if the value is an actual dictionary. If so, massage the values of it to deeply
# make sure this is a popo.
if isinstance(attr, dict):
# Don't use attr.items() here, as it will error in the case of outputs with an `items` property.
return {
key: massage(attr[key], seen) for key in attr if not key.startswith("_")
}
if hasattr(attr, "__iter__"):
return [massage(item, seen) for item in attr]
return serialize_all_keys(is_public_key)
def reference_contains(val1: Any, seen: List[Any]) -> bool:
for val2 in seen:
if val1 is val2:
return True
return False
def is_primitive(attr: Any) -> bool:
if attr is None:
return True
if isinstance(attr, str):
return True
# dictionaries, lists and dictionary-like things are not primitive.
if isinstance(attr, dict):
return False
if hasattr(attr, "__dict__"):
return False
try:
iter(attr)
return False
except TypeError:
pass
return True
def register_stack_transformation(t: ResourceTransformation):
"""
Add a transformation to all future resources constructed in this Pulumi stack.
"""
root_resource = get_root_resource()
if root_resource is None:
raise Exception(
"The root stack resource was referenced before it was initialized."
)
if root_resource._transformations is None:
root_resource._transformations = [t]
else:
root_resource._transformations = root_resource._transformations + [t]
|
8fa881f620256d6109eb5c38a1c2ca5c472ea544
|
dd91ea0a9b143371cfb186eaa74333da9488510d
|
/python/interpret-core/interpret/provider/_visualize.py
|
6aecc87d83a33a35e5d3af3a98c564f5a0b74bd1
|
[
"MIT"
] |
permissive
|
interpretml/interpret
|
6c6ef2f2e6a6bb9c43633251089385cc44affe16
|
e6f38ea195aecbbd9d28c7183a83c65ada16e1ae
|
refs/heads/develop
| 2023-09-03T17:42:50.611413
| 2023-08-28T18:16:10
| 2023-08-28T18:16:10
| 184,704,903
| 3,731
| 472
|
MIT
| 2023-08-15T04:31:34
| 2019-05-03T05:47:52
|
C++
|
UTF-8
|
Python
| false
| false
| 8,375
|
py
|
_visualize.py
|
# Copyright (c) 2023 The InterpretML Contributors
# Distributed under the MIT software license
from abc import ABC, abstractmethod
import logging
from ._environment import EnvironmentDetector, is_cloud_env, ENV_DETECTED
from .._version import __version__
JS_URL = "https://unpkg.com/@interpretml/interpret-inline@{}/dist/interpret-inline.js".format(
__version__
)
_log = logging.getLogger(__name__)
class VisualizeProvider(ABC):
@abstractmethod
def render(self, explanation, key=-1, **kwargs):
pass # pragma: no cover
class AutoVisualizeProvider(VisualizeProvider):
def __init__(self, app_runner=None, **kwargs):
self.has_initialized = False
self.environment_detector = None
self.in_cloud_env = ENV_DETECTED
self.provider = None
self.app_runner = app_runner
self.kwargs = kwargs
def _lazy_initialize(self):
self.environment_detector = EnvironmentDetector()
detected_envs = self.environment_detector.detect()
self.in_cloud_env = is_cloud_env(detected_envs)
# NOTE: This is tested manually per release. Ignoring for coverage.
if self.in_cloud_env == ENV_DETECTED.CLOUD: # pragma: no cover
_log.info("Detected cloud environment.")
self.provider = InlineProvider(detected_envs=detected_envs, js_url=JS_URL)
elif "docker-dev-mode" in detected_envs:
_log.info("Operating in docker development mode.")
self.provider = InlineProvider(detected_envs=detected_envs)
elif self.in_cloud_env == ENV_DETECTED.BOTH_CLOUD_AND_NON_CLOUD:
_log.info("Detected both cloud and non cloud environment.")
# val = input("Type 'C' if you want to choose Cloud environment or 'NC' for Non Cloud Environment :")
val = "C"
if val == "C":
self.provider = InlineProvider(
detected_envs=detected_envs, js_url=JS_URL
)
else:
if self.app_runner:
self.provider = DashProvider(self.app_runner)
else:
self.provider = DashProvider.from_address()
else: # ENV_DETECTED.NON_CLOUD
_log.info("Detected non-cloud environment.")
if self.app_runner:
self.provider = DashProvider(self.app_runner)
else:
self.provider = DashProvider.from_address()
def render(self, explanation, key=-1, **kwargs):
if not self.has_initialized:
self._lazy_initialize()
self.has_initialized = True
self.provider.render(explanation, key=key, **kwargs)
class PreserveProvider(VisualizeProvider):
def render(self, explanation, key=-1, **kwargs):
file_name = kwargs.pop("file_name", None)
# NOTE: Preserve didn't support returning everything. If key is -1 default to key is None.
# This is for backward-compatibility. All of this will be deprecated shortly anyway.
if key == -1:
key = None
# Get visual object
visual = explanation.visualize(key=key)
# Output to front-end/file
self._preserve_output(
explanation.name, visual, selector_key=key, file_name=file_name, **kwargs
)
return None
def _preserve_output(
self, explanation_name, visual, selector_key=None, file_name=None, **kwargs
):
from plotly.offline import iplot, plot, init_notebook_mode
from IPython.display import display, display_html
from base64 import b64encode
from plotly import graph_objs as go
from pandas.core.generic import NDFrame
import dash.development.base_component as dash_base
init_notebook_mode(connected=True)
def render_html(html_string):
base64_html = b64encode(html_string.encode("utf-8")).decode("ascii")
final_html = """<iframe src="data:text/html;base64,{data}" width="100%" height=400 frameBorder="0"></iframe>""".format(
data=base64_html
)
display_html(final_html, raw=True)
if visual is None: # pragma: no cover
msg = (
"No visualization for explanation [{0}] with selector_key [{1}]".format(
explanation_name, selector_key
)
)
_log.error(msg)
if file_name is None:
render_html(msg)
else:
pass
return False
if isinstance(visual, go.Figure):
if file_name is None:
iplot(visual, **kwargs)
else:
plot(visual, filename=file_name, **kwargs)
elif isinstance(visual, NDFrame):
if file_name is None:
display(visual, **kwargs)
else:
visual.to_html(file_name, **kwargs)
elif isinstance(visual, str):
if file_name is None:
render_html(visual)
else:
with open(file_name, "w") as f:
f.write(visual)
elif isinstance(visual, dash_base.Component): # pragma: no cover
msg = "Preserving dash components is currently not supported."
if file_name is None:
render_html(msg)
_log.error(msg)
return False
else: # pragma: no cover
msg = "Visualization cannot be preserved for type: {0}.".format(
type(visual)
)
if file_name is None:
render_html(msg)
_log.error(msg)
return False
return True
class DashProvider(VisualizeProvider):
"""Provides rendering via Plotly's Dash.
This works in the event of an environment that can expose HTTP(s) ports.
"""
def __init__(self, app_runner):
"""Initializes class.
This requires an instantiated `AppRunner`, call `.from_address` instead
to initialize both.
Args:
app_runner: An AppRunner instance.
"""
self.app_runner = app_runner
@classmethod
def from_address(cls, addr=None, base_url=None, use_relative_links=False):
"""Initialize a new `AppRunner` along with the provider.
Args:
addr: A tuple that is (ip_addr, port).
base_url: Base URL, this useful when behind a proxy.
use_relative_links: Relative links for rendered pages instead of full URI.
"""
from ..visual.dashboard import AppRunner
app_runner = AppRunner(
addr=addr, base_url=base_url, use_relative_links=use_relative_links
)
return cls(app_runner)
def idempotent_start(self):
status = self.app_runner.status()
if not status["thread_alive"]:
self.app_runner.start()
def link(self, explanation, **kwargs):
self.idempotent_start()
# Register
share_tables = kwargs.pop("share_tables", None)
self.app_runner.register(explanation, share_tables=share_tables)
url = self.app_runner.display_link(explanation)
return url
def render(self, explanation, **kwargs):
self.idempotent_start()
# Register
share_tables = kwargs.pop("share_tables", None)
self.app_runner.register(explanation, share_tables=share_tables)
# Display
open_link = isinstance(explanation, list)
self.app_runner.display(explanation, open_link=open_link)
class InlineProvider(VisualizeProvider):
"""Provides rendering via JavaScript that are invoked within Jupyter cells."""
def __init__(self, detected_envs=None, js_url=None):
"""Initializes class.
Args:
detected_envs: Environments targetted as defined in `interpret.utils.environment`.
js_url: If defined, will load the JavaScript bundle for interpret-inline from the given URL.
"""
self.detected_envs = [] if detected_envs is None else detected_envs
self.js_url = js_url
def render(self, explanation, key=-1, **kwargs):
from ..visual._inline import render
render(
explanation,
default_key=key,
detected_envs=self.detected_envs,
js_url=self.js_url,
)
|
7515ff11ddf5f1e27558a45f0585b9fbce69c053
|
8730cd656903205527da131c3308579f1491cd04
|
/tests/selenium/test_guide/test_essentials/test_list_rendering.py
|
b2ecd42ef3433a5bd77c5b970e11a81b959dc42d
|
[
"MIT"
] |
permissive
|
stefanhoelzl/vue.py
|
9509cff8a8fbd1b4a835b9f2e362935e76bf39ba
|
581e764d57e2b476e700034bc11000cd07f176df
|
refs/heads/master
| 2023-02-21T18:13:21.969549
| 2023-02-15T14:37:17
| 2023-02-15T14:43:10
| 139,488,690
| 323
| 21
|
MIT
| 2023-02-15T12:02:26
| 2018-07-02T20:05:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
test_list_rendering.py
|
from vue import *
def test_mutation_methods(selenium):
class MutationMethods(VueComponent):
array = [1, 2, 3]
template = "<div id='done' />"
def created(self):
print(self.array) # 1,2,3
print(self.array.pop()) # 3
print(self.array) # 1,2
self.array.append(4)
print(self.array) # 1,2,4
print(self.array.pop(0)) # 1
print(self.array) # 2,4
self.array[0:0] = [6, 4]
print(self.array) # 6,4,2,4
self.array.insert(2, 8)
print(self.array) # 6,4,8,2,4
del self.array[3]
print(self.array) # 6,4,8,4
self.array.sort(key=lambda a: 0 - a)
print(self.array) # 8,6,4,4
self.array.reverse()
print(self.array) # 4,4,6,8
with selenium.app(MutationMethods):
selenium.element_present("done")
logs = [
l["message"].split(" ", 2)[-1][:-3][1:] for l in selenium.get_logs()[-11:]
]
assert logs == [
"[1, 2, 3]",
"3",
"[1, 2]",
"[1, 2, 4]",
"1",
"[2, 4]",
"[6, 4, 2, 4]",
"[6, 4, 8, 2, 4]",
"[6, 4, 8, 4]",
"[8, 6, 4, 4]",
"[4, 4, 6, 8]",
]
|
8c66515d25a7a6e03bf6c0b86c83b2baa5a73504
|
afba4b442011358e053a43d5f66df726543eb751
|
/quimb/__init__.py
|
bf20d21b2dea64d4dc2388101a446f6cc9ed54fe
|
[
"Apache-2.0"
] |
permissive
|
jcmgray/quimb
|
c4450a41dd50e32dea24c712b6fb3925aff7f89e
|
8183746fcc8a3ae136d5f5f91893fa720f543533
|
refs/heads/main
| 2023-08-31T09:02:12.643261
| 2023-08-02T20:58:30
| 2023-08-02T20:58:30
| 47,694,811
| 379
| 105
|
NOASSERTION
| 2023-09-10T18:25:09
| 2015-12-09T14:02:41
|
Python
|
UTF-8
|
Python
| false
| false
| 8,933
|
py
|
__init__.py
|
"""
Quantum Information for Many-Body calculations.
"""
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
try:
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from pathlib import Path as _Path
__version__ = _gv(_Path(__file__).parent.parent)
except ImportError:
# setuptools_scm is not available, use a default version
__version__ = "0.0.0+unknown"
import warnings
# some useful math
from math import pi, cos, sin, tan, exp, log, log2, log10, sqrt
# Core functions
from .core import (
qarray,
prod,
isket,
isbra,
isop,
isvec,
issparse,
isdense,
isreal,
isherm,
ispos,
mul,
dag,
dot,
vdot,
rdot,
ldmul,
rdmul,
outer,
explt,
get_thread_pool,
normalize,
chop,
quimbify,
qu,
ket,
bra,
dop,
sparse,
infer_size,
trace,
identity,
eye,
speye,
dim_map,
dim_compress,
kron,
kronpow,
ikron,
pkron,
permute,
itrace,
partial_trace,
expectation,
expec,
nmlz,
tr,
ptr,
)
# Linear algebra functions
from .linalg.base_linalg import (
eigensystem,
eig,
eigh,
eigvals,
eigvalsh,
eigvecs,
eigvecsh,
eigensystem_partial,
groundstate,
groundenergy,
bound_spectrum,
eigh_window,
eigvalsh_window,
eigvecsh_window,
svd,
svds,
norm,
expm,
sqrtm,
expm_multiply,
Lazy,
)
from .linalg.rand_linalg import rsvd, estimate_rank
from .linalg.mpi_launcher import get_mpi_pool, can_use_mpi_pool
# Generating objects
from .gen.operators import (
spin_operator,
pauli,
hadamard,
phase_gate,
S_gate,
T_gate,
U_gate,
rotation,
Rx,
Ry,
Rz,
Xsqrt,
Ysqrt,
Zsqrt,
Wsqrt,
swap,
iswap,
fsim,
fsimg,
ncontrolled_gate,
controlled,
CNOT,
cX,
cY,
cZ,
ccX,
ccY,
ccZ,
controlled_swap,
fredkin,
toffoli,
ham_heis,
ham_ising,
ham_XY,
ham_XXZ,
ham_j1j2,
ham_mbl,
ham_heis_2D,
zspin_projector,
create,
destroy,
num,
ham_hubbard_hardcore,
)
from .gen.states import (
basis_vec,
up,
zplus,
down,
zminus,
plus,
xplus,
minus,
xminus,
yplus,
yminus,
bloch_state,
bell_state,
singlet,
thermal_state,
neel_state,
singlet_pairs,
werner_state,
ghz_state,
w_state,
levi_civita,
perm_state,
graph_state_1d,
computational_state,
)
from .gen.rand import (
randn,
rand,
rand_matrix,
rand_herm,
rand_pos,
rand_rho,
rand_ket,
rand_uni,
rand_haar_state,
gen_rand_haar_states,
rand_mix,
rand_product_state,
rand_matrix_product_state,
rand_mps,
rand_seperable,
rand_iso,
rand_mera,
seed_rand,
set_rand_bitgen,
)
# Functions for calculating properties
from .calc import (
fidelity,
purify,
entropy,
entropy_subsys,
mutual_information,
mutinf,
mutinf_subsys,
schmidt_gap,
tr_sqrt,
tr_sqrt_subsys,
partial_transpose,
negativity,
logarithmic_negativity,
logneg,
logneg_subsys,
concurrence,
one_way_classical_information,
quantum_discord,
trace_distance,
cprint,
decomp,
pauli_decomp,
bell_decomp,
correlation,
pauli_correlations,
ent_cross_matrix,
qid,
is_degenerate,
is_eigenvector,
page_entropy,
heisenberg_energy,
dephase,
kraus_op,
projector,
measure,
simulate_counts,
)
# Evolution class and methods
from .evo import Evolution
from .linalg.approx_spectral import (
approx_spectral_function,
tr_abs_approx,
tr_exp_approx,
tr_sqrt_approx,
tr_xlogx_approx,
entropy_subsys_approx,
logneg_subsys_approx,
negativity_subsys_approx,
xlogx,
)
from .utils import (
save_to_disk,
load_from_disk,
oset,
LRU,
tree_map,
tree_apply,
tree_flatten,
tree_unflatten,
format_number_with_error,
NEUTRAL_STYLE,
default_to_neutral_style,
)
warnings.filterwarnings("ignore", message="Caching is not available when ")
__all__ = [
# Accel ----------------------------------------------------------------- #
"qarray",
"prod",
"isket",
"isbra",
"isop",
"isvec",
"issparse",
"isdense",
"isreal",
"isherm",
"ispos",
"mul",
"dag",
"dot",
"vdot",
"rdot",
"ldmul",
"rdmul",
"outer",
"explt",
# Core ------------------------------------------------------------------ #
"normalize",
"chop",
"quimbify",
"qu",
"ket",
"bra",
"dop",
"sparse",
"infer_size",
"trace",
"identity",
"eye",
"speye",
"dim_map",
"dim_compress",
"kron",
"kronpow",
"ikron",
"pkron",
"permute",
"itrace",
"partial_trace",
"expectation",
"expec",
"nmlz",
"tr",
"ptr",
# Linalg ---------------------------------------------------------------- #
"eigensystem",
"eig",
"eigh",
"eigvals",
"eigvalsh",
"eigvecs",
"eigvecsh",
"eigensystem_partial",
"groundstate",
"groundenergy",
"bound_spectrum",
"eigh_window",
"eigvalsh_window",
"eigvecsh_window",
"svd",
"svds",
"norm",
"Lazy",
"rsvd",
"estimate_rank",
# Gen ------------------------------------------------------------------- #
"spin_operator",
"pauli",
"hadamard",
"phase_gate",
"T_gate",
"S_gate",
"U_gate",
"rotation",
"Rx",
"Ry",
"Rz",
"Xsqrt",
"Ysqrt",
"Zsqrt",
"Wsqrt",
"swap",
"iswap",
"fsim",
"fsimg",
"ncontrolled_gate",
"controlled",
"CNOT",
"cX",
"cY",
"cZ",
"ccX",
"ccY",
"ccZ",
"controlled_swap",
"fredkin",
"toffoli",
"ham_heis",
"ham_ising",
"ham_XY",
"ham_XXZ",
"ham_j1j2",
"ham_mbl",
"ham_heis_2D",
"create",
"destroy",
"num",
"ham_hubbard_hardcore",
"zspin_projector",
"basis_vec",
"up",
"zplus",
"down",
"zminus",
"plus",
"xplus",
"minus",
"xminus",
"yplus",
"yminus",
"bloch_state",
"bell_state",
"singlet",
"thermal_state",
"neel_state",
"singlet_pairs",
"werner_state",
"ghz_state",
"w_state",
"levi_civita",
"perm_state",
"graph_state_1d",
"rand_matrix",
"rand_herm",
"rand_pos",
"rand_rho",
"rand_ket",
"rand_uni",
"rand_haar_state",
"gen_rand_haar_states",
"rand_mix",
"rand_mps",
"randn",
"rand",
"rand_product_state",
"rand_matrix_product_state",
"rand_seperable",
"rand_iso",
"rand_mera",
"seed_rand",
"set_rand_bitgen",
"computational_state",
# Calc ------------------------------------------------------------------ #
"expm",
"sqrtm",
"expm_multiply",
"fidelity",
"purify",
"entropy",
"entropy_subsys",
"mutual_information",
"mutinf",
"mutinf_subsys",
"schmidt_gap",
"tr_sqrt",
"tr_sqrt_subsys",
"partial_transpose",
"negativity",
"logarithmic_negativity",
"logneg",
"logneg_subsys",
"concurrence",
"one_way_classical_information",
"quantum_discord",
"trace_distance",
"cprint",
"decomp",
"pauli_decomp",
"bell_decomp",
"correlation",
"pauli_correlations",
"ent_cross_matrix",
"qid",
"is_degenerate",
"is_eigenvector",
"page_entropy",
"heisenberg_energy",
"dephase",
"kraus_op",
"projector",
"measure",
"simulate_counts",
# Evo ------------------------------------------------------------------- #
"Evolution",
# Approx spectral ------------------------------------------------------- #
"approx_spectral_function",
"tr_abs_approx",
"tr_exp_approx",
"tr_sqrt_approx",
"tr_xlogx_approx",
"entropy_subsys_approx",
"logneg_subsys_approx",
"negativity_subsys_approx",
# Some misc useful math ------------------------------------------------- #
"pi",
"cos",
"sin",
"tan",
"exp",
"log",
"log2",
"log10",
"sqrt",
"xlogx",
# Utils ----------------------------------------------------------------- #
"save_to_disk",
"load_from_disk",
"get_thread_pool",
"get_mpi_pool",
"can_use_mpi_pool",
"oset",
"LRU",
"tree_map",
"tree_apply",
"tree_flatten",
"tree_unflatten",
"format_number_with_error",
"NEUTRAL_STYLE",
"default_to_neutral_style",
]
|
cb6eeeac20908757cdbff27b24be84ff2b1eac95
|
50c1abfbfde3554c61bb1d9f7057ba7aaa6488c0
|
/onesided_cards.py
|
6033fa36a25180b940a587adf694928c895a14c5
|
[] |
no_license
|
ospalh/anki-addons
|
f818a67f1c28f55c320b78c2606cae459d0b572a
|
4ece13423bd541e29d9b40ebe26ca0999a6962b1
|
refs/heads/develop
| 2023-08-08T16:56:15.390038
| 2021-02-03T09:26:45
| 2021-02-03T09:26:45
| 4,192,177
| 129
| 60
| null | 2023-07-21T13:54:46
| 2012-05-01T12:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
onesided_cards.py
|
#!/usr/bin/env python
# -*- mode: python ; coding: utf-8 -*-
#
# Copyright © 2012 Roland Sieker, <ospalh@gmail.com>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
from aqt import mw
from anki.consts import MODEL_STD
from anki.hooks import addHook
__version__ = "1.0.2"
def maybe_skip_question():
model = mw.reviewer.card.model()
if model['type'] != MODEL_STD:
# Not standard, (i.e., cloze): Something on the back side. So
# don't skip.
return
back = model['tmpls'][mw.reviewer.card.ord]['afmt'].strip()
if '{{FrontSide}}' == back:
if not mw.reviewer._bottomReady:
# Looking at the reviewer.py source code, this may not
# always show images. But the reviewer code uses some
# strange voodoo, so i don't really know what to do about
# it.
mw.reviewer._showAnswer()
else:
try:
# Currently, this seems to be the right thing to do.
mw.reviewer._showAnswerHack()
except NameError:
# Maybe next week we need this again.
mw.reviewer._showAnswer()
addHook("showQuestion", maybe_skip_question)
|
0e288aae51910ab1cc8816a92371de18fa6007b8
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/python/mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py
|
4e0adbdcd7fdafb97e4c9c24b4d9ed979f66ef77
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
prod_force_se_a.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ProdForceSeA op"""
from mindspore.ops.op_info_register import op_info_register, AkgAscendRegOp, DataType as DT
op_info = AkgAscendRegOp("ProdForceSeA") \
.fusion_type("ELEMWISE") \
.attr("natoms", "required", "int") \
.input(0, "net_deriv_tensor") \
.input(1, "in_deriv_tensor") \
.input(2, "nlist_tensor") \
.output(0, "output") \
.dtype_format(DT.F32_Default, DT.F32_Default, DT.I32_Default, DT.F32_Default) \
.get_op_info()
@op_info_register(op_info)
def _prod_force_se_a_akg():
"""ProdForceSeA Akg register"""
return
|
edc998ff41a44f92d2247d297a224dd87e27252b
|
b54f5fe75dbb010a18d1da30a4f030fbb257ed4a
|
/core/src/toga/handlers.py
|
c434a3454bf9700146fdfa32ca0320cfaab23586
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/toga
|
449e3f008ad89e10f8ffcc61bdac798e7e825d09
|
01b076bd6434d0bd04c04ff72ac6eb20b9e973ea
|
refs/heads/main
| 2023-08-24T11:33:53.705165
| 2023-08-24T00:06:09
| 2023-08-24T00:06:09
| 22,529,973
| 1,865
| 468
|
BSD-3-Clause
| 2023-09-14T18:46:58
| 2014-08-01T21:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,236
|
py
|
handlers.py
|
import asyncio
import inspect
import sys
import traceback
from abc import ABC
class NativeHandler:
def __init__(self, handler):
self.native = handler
async def long_running_task(interface, generator, cleanup):
"""Run a generator as an asynchronous coroutine."""
try:
try:
while True:
delay = next(generator)
if delay:
await asyncio.sleep(delay)
except StopIteration as e:
result = e.value
except Exception as e:
print("Error in long running handler:", e, file=sys.stderr)
traceback.print_exc()
else:
if cleanup:
try:
cleanup(interface, result)
except Exception as e:
print("Error in long running handler cleanup:", e, file=sys.stderr)
traceback.print_exc()
async def handler_with_cleanup(handler, cleanup, interface, *args, **kwargs):
try:
result = await handler(interface, *args, **kwargs)
except Exception as e:
print("Error in async handler:", e, file=sys.stderr)
traceback.print_exc()
else:
if cleanup:
try:
cleanup(interface, result)
except Exception as e:
print("Error in async handler cleanup:", e, file=sys.stderr)
traceback.print_exc()
def wrapped_handler(interface, handler, cleanup=None):
"""Wrap a handler provided by the user, so it can be invoked.
If the handler is a NativeHandler, return the handler object contained in the
NativeHandler.
If the handler is a bound method, or function, invoke it as it, and return the
result.
If the handler is a generator, invoke it asynchronously, with the yield values from
the generator representing the duration to sleep between iterations.
If the handler is a coroutine, install it on the asynchronous event loop.
Returns either the native handler, or a wrapped function that will invoke the
handler, using the interface as context. If a non-native handler, the wrapper
function is annotated with the original handler function on the `_raw` attribute.
"""
if handler:
if isinstance(handler, NativeHandler):
return handler.native
def _handler(widget, *args, **kwargs):
if asyncio.iscoroutinefunction(handler):
asyncio.ensure_future(
handler_with_cleanup(handler, cleanup, interface, *args, **kwargs)
)
else:
try:
result = handler(interface, *args, **kwargs)
except Exception as e:
print("Error in handler:", e, file=sys.stderr)
traceback.print_exc()
else:
if inspect.isgenerator(result):
asyncio.ensure_future(
long_running_task(interface, result, cleanup)
)
else:
try:
if cleanup:
cleanup(interface, result)
return result
except Exception as e:
print("Error in handler cleanup:", e, file=sys.stderr)
traceback.print_exc()
_handler._raw = handler
else:
# A dummy no-op handler
def _handler(widget, *args, **kwargs):
pass
_handler._raw = None
return _handler
class AsyncResult(ABC):
def __init__(self):
loop = asyncio.get_event_loop()
self.future = loop.create_future()
def __repr__(self):
return f"<Async {self.RESULT_TYPE} result; future={self.future}>"
def __await__(self):
return self.future.__await__()
# All the comparison dunder methods are disabled
def __bool__(self, other):
raise RuntimeError(
f"Can't check {self.RESULT_TYPE} result directly; use await or an on_result handler"
)
__lt__ = __bool__
__le__ = __bool__
__eq__ = __bool__
__ne__ = __bool__
__gt__ = __bool__
__ge__ = __bool__
|
97e7da10489cf961a8197ec77b5a6f82ad00bad0
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/browser/chromeos/extensions/printing_metrics/DEPS
|
51376a3706578d29c6fea6b4c27c6b084810927d
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 151
|
DEPS
|
specific_include_rules = {
".*test\.c": [
# Guarded by IS_CHROMEOS_ASH or in is_chromeos_ash sources.
"+chrome/browser/ash/printing",
],
}
|
|
6e1c9f942bff90956d551d3fbc3213bec181f398
|
13aa7b9d76c364eb6b112ad057f81ea2fa63da3a
|
/modules/wireless/mana-toolkit.py
|
44c8364d11098c3eb4a5344454156980de149448
|
[] |
no_license
|
trustedsec/ptf
|
df2ed6671c9efd05db756faec41298fbfb50bc5a
|
f87dfa8b3b62f2157fc51e8abe31bf3f0bf8541c
|
refs/heads/master
| 2023-08-31T08:01:58.033518
| 2023-03-15T17:04:49
| 2023-03-15T17:04:49
| 35,505,904
| 5,087
| 1,434
| null | 2023-08-27T22:33:41
| 2015-05-12T18:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
mana-toolkit.py
|
#!/usr/bin/env python
#######################################
# Installation module for mana-toolkit
#######################################
# AUTHOR OF MODULE NAME
AUTHOR="jklaz"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update the mana-toolkit"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/sensepost/mana"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="mana"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="libnl-3-dev,isc-dhcp-server,tinyproxy,libssl-dev,apache2,macchanger,python-dnspython,python-pcapy,dsniff,stunnel4,python-scapy"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,libnl,dhcp-forwarder,tinyproxy,openssl,httpd,macchanger,python-dns,pcapy,dsniff,stunnel,scapy,sslsplit"
#In order to check new versions of sslstrip+ and net-creds
BYPASS_UPDATE="YES"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="git clone --depth 1 https://github.com/sensepost/mana {INSTALL_LOCATION},cd {INSTALL_LOCATION},git submodule init,git submodule update,make -j4,make install"
|
6ed28901b0433fdd83db65a352d1573394bf2fd8
|
c558b0a576c18d4950cd34db48efb01c13bce27c
|
/test/stubs/colorama.pyi
|
cd5a50a7d9402f0cda580748c65f1a55e8b4c615
|
[
"MIT"
] |
permissive
|
jtesta/ssh-audit
|
de4a44293692d6b21aaeaac5272355fcc7d322fc
|
3f2fdbaa3ddc3607eaf46b11e2be588ce415f6a1
|
refs/heads/master
| 2023-07-24T05:42:55.829154
| 2023-07-11T15:08:42
| 2023-07-11T15:08:42
| 104,118,612
| 1,682
| 135
|
MIT
| 2023-09-07T02:32:19
| 2017-09-19T19:24:59
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
pyi
|
colorama.pyi
|
from typing import Optional
def init(autoreset: bool = False, convert: Optional[bool] = None, strip: Optional[bool] = None, wrap: bool = True) -> None: ...
|
40c8a9eebabe2dab5efd4423774c8756d9bad170
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/query_vmr_result_dto.py
|
e9d9881d839bae44a81c0e8961c0aa06b286b770
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 16,113
|
py
|
query_vmr_result_dto.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QueryVmrResultDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'vmr_id': 'str',
'vmr_name': 'str',
'gust_pwd': 'str',
'gust_join_url': 'str',
'chair_pwd': 'str',
'chair_join_url': 'str',
'allow_gust_first': 'bool',
'gust_first_notice': 'bool',
'vmr_mode': 'int',
'vmr_pkg_id': 'str',
'vmr_pkg_name': 'str',
'vmr_pkg_parties': 'int',
'status': 'int'
}
attribute_map = {
'id': 'id',
'vmr_id': 'vmrId',
'vmr_name': 'vmrName',
'gust_pwd': 'gustPwd',
'gust_join_url': 'gustJoinUrl',
'chair_pwd': 'chairPwd',
'chair_join_url': 'chairJoinUrl',
'allow_gust_first': 'allowGustFirst',
'gust_first_notice': 'gustFirstNotice',
'vmr_mode': 'vmrMode',
'vmr_pkg_id': 'vmrPkgId',
'vmr_pkg_name': 'vmrPkgName',
'vmr_pkg_parties': 'vmrPkgParties',
'status': 'status'
}
def __init__(self, id=None, vmr_id=None, vmr_name=None, gust_pwd=None, gust_join_url=None, chair_pwd=None, chair_join_url=None, allow_gust_first=None, gust_first_notice=None, vmr_mode=None, vmr_pkg_id=None, vmr_pkg_name=None, vmr_pkg_parties=None, status=None):
"""QueryVmrResultDTO
The model defined in huaweicloud sdk
:param id: 云会议室的ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrID。
:type id: str
:param vmr_id: 云会议室的固定会议ID或者个人会议ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrConferenceID。
:type vmr_id: str
:param vmr_name: 云会议室名称。
:type vmr_name: str
:param gust_pwd: 来宾密码。
:type gust_pwd: str
:param gust_join_url: 来宾与会链接。
:type gust_join_url: str
:param chair_pwd: 主持人密码。
:type chair_pwd: str
:param chair_join_url: 主持人与会链接。
:type chair_join_url: str
:param allow_gust_first: 允许来宾先入会。
:type allow_gust_first: bool
:param gust_first_notice: 云会议室被使用后是否通知会议室所有者。
:type gust_first_notice: bool
:param vmr_mode: VMR模式。 * 0: 个人会议ID * 1: 云会议室 * 2: 网络研讨会
:type vmr_mode: int
:param vmr_pkg_id: 云会议室套餐包的id,仅云会议室返回。
:type vmr_pkg_id: str
:param vmr_pkg_name: 云会议室套餐包的名称,仅云会议室返回。
:type vmr_pkg_name: str
:param vmr_pkg_parties: 云会议室套餐包的会议并发方数,仅云会议室返回。
:type vmr_pkg_parties: int
:param status: 云会议室状态。 * 0:正常 * 1:停用 * 2:未分配
:type status: int
"""
self._id = None
self._vmr_id = None
self._vmr_name = None
self._gust_pwd = None
self._gust_join_url = None
self._chair_pwd = None
self._chair_join_url = None
self._allow_gust_first = None
self._gust_first_notice = None
self._vmr_mode = None
self._vmr_pkg_id = None
self._vmr_pkg_name = None
self._vmr_pkg_parties = None
self._status = None
self.discriminator = None
if id is not None:
self.id = id
if vmr_id is not None:
self.vmr_id = vmr_id
if vmr_name is not None:
self.vmr_name = vmr_name
if gust_pwd is not None:
self.gust_pwd = gust_pwd
if gust_join_url is not None:
self.gust_join_url = gust_join_url
if chair_pwd is not None:
self.chair_pwd = chair_pwd
if chair_join_url is not None:
self.chair_join_url = chair_join_url
if allow_gust_first is not None:
self.allow_gust_first = allow_gust_first
if gust_first_notice is not None:
self.gust_first_notice = gust_first_notice
if vmr_mode is not None:
self.vmr_mode = vmr_mode
if vmr_pkg_id is not None:
self.vmr_pkg_id = vmr_pkg_id
if vmr_pkg_name is not None:
self.vmr_pkg_name = vmr_pkg_name
if vmr_pkg_parties is not None:
self.vmr_pkg_parties = vmr_pkg_parties
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this QueryVmrResultDTO.
云会议室的ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrID。
:return: The id of this QueryVmrResultDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QueryVmrResultDTO.
云会议室的ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrID。
:param id: The id of this QueryVmrResultDTO.
:type id: str
"""
self._id = id
@property
def vmr_id(self):
"""Gets the vmr_id of this QueryVmrResultDTO.
云会议室的固定会议ID或者个人会议ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrConferenceID。
:return: The vmr_id of this QueryVmrResultDTO.
:rtype: str
"""
return self._vmr_id
@vmr_id.setter
def vmr_id(self, vmr_id):
"""Sets the vmr_id of this QueryVmrResultDTO.
云会议室的固定会议ID或者个人会议ID。 > 对应[[创建会议](https://support.huaweicloud.com/api-meeting/meeting_21_0014.html)](tag:hws)[[创建会议](https://support.huaweicloud.com/intl/zh-cn/api-meeting/meeting_21_0014.html)](tag:hk)接口中的vmrConferenceID。
:param vmr_id: The vmr_id of this QueryVmrResultDTO.
:type vmr_id: str
"""
self._vmr_id = vmr_id
@property
def vmr_name(self):
"""Gets the vmr_name of this QueryVmrResultDTO.
云会议室名称。
:return: The vmr_name of this QueryVmrResultDTO.
:rtype: str
"""
return self._vmr_name
@vmr_name.setter
def vmr_name(self, vmr_name):
"""Sets the vmr_name of this QueryVmrResultDTO.
云会议室名称。
:param vmr_name: The vmr_name of this QueryVmrResultDTO.
:type vmr_name: str
"""
self._vmr_name = vmr_name
@property
def gust_pwd(self):
"""Gets the gust_pwd of this QueryVmrResultDTO.
来宾密码。
:return: The gust_pwd of this QueryVmrResultDTO.
:rtype: str
"""
return self._gust_pwd
@gust_pwd.setter
def gust_pwd(self, gust_pwd):
"""Sets the gust_pwd of this QueryVmrResultDTO.
来宾密码。
:param gust_pwd: The gust_pwd of this QueryVmrResultDTO.
:type gust_pwd: str
"""
self._gust_pwd = gust_pwd
@property
def gust_join_url(self):
"""Gets the gust_join_url of this QueryVmrResultDTO.
来宾与会链接。
:return: The gust_join_url of this QueryVmrResultDTO.
:rtype: str
"""
return self._gust_join_url
@gust_join_url.setter
def gust_join_url(self, gust_join_url):
"""Sets the gust_join_url of this QueryVmrResultDTO.
来宾与会链接。
:param gust_join_url: The gust_join_url of this QueryVmrResultDTO.
:type gust_join_url: str
"""
self._gust_join_url = gust_join_url
@property
def chair_pwd(self):
"""Gets the chair_pwd of this QueryVmrResultDTO.
主持人密码。
:return: The chair_pwd of this QueryVmrResultDTO.
:rtype: str
"""
return self._chair_pwd
@chair_pwd.setter
def chair_pwd(self, chair_pwd):
"""Sets the chair_pwd of this QueryVmrResultDTO.
主持人密码。
:param chair_pwd: The chair_pwd of this QueryVmrResultDTO.
:type chair_pwd: str
"""
self._chair_pwd = chair_pwd
@property
def chair_join_url(self):
"""Gets the chair_join_url of this QueryVmrResultDTO.
主持人与会链接。
:return: The chair_join_url of this QueryVmrResultDTO.
:rtype: str
"""
return self._chair_join_url
@chair_join_url.setter
def chair_join_url(self, chair_join_url):
"""Sets the chair_join_url of this QueryVmrResultDTO.
主持人与会链接。
:param chair_join_url: The chair_join_url of this QueryVmrResultDTO.
:type chair_join_url: str
"""
self._chair_join_url = chair_join_url
@property
def allow_gust_first(self):
"""Gets the allow_gust_first of this QueryVmrResultDTO.
允许来宾先入会。
:return: The allow_gust_first of this QueryVmrResultDTO.
:rtype: bool
"""
return self._allow_gust_first
@allow_gust_first.setter
def allow_gust_first(self, allow_gust_first):
"""Sets the allow_gust_first of this QueryVmrResultDTO.
允许来宾先入会。
:param allow_gust_first: The allow_gust_first of this QueryVmrResultDTO.
:type allow_gust_first: bool
"""
self._allow_gust_first = allow_gust_first
@property
def gust_first_notice(self):
"""Gets the gust_first_notice of this QueryVmrResultDTO.
云会议室被使用后是否通知会议室所有者。
:return: The gust_first_notice of this QueryVmrResultDTO.
:rtype: bool
"""
return self._gust_first_notice
@gust_first_notice.setter
def gust_first_notice(self, gust_first_notice):
"""Sets the gust_first_notice of this QueryVmrResultDTO.
云会议室被使用后是否通知会议室所有者。
:param gust_first_notice: The gust_first_notice of this QueryVmrResultDTO.
:type gust_first_notice: bool
"""
self._gust_first_notice = gust_first_notice
@property
def vmr_mode(self):
"""Gets the vmr_mode of this QueryVmrResultDTO.
VMR模式。 * 0: 个人会议ID * 1: 云会议室 * 2: 网络研讨会
:return: The vmr_mode of this QueryVmrResultDTO.
:rtype: int
"""
return self._vmr_mode
@vmr_mode.setter
def vmr_mode(self, vmr_mode):
"""Sets the vmr_mode of this QueryVmrResultDTO.
VMR模式。 * 0: 个人会议ID * 1: 云会议室 * 2: 网络研讨会
:param vmr_mode: The vmr_mode of this QueryVmrResultDTO.
:type vmr_mode: int
"""
self._vmr_mode = vmr_mode
@property
def vmr_pkg_id(self):
"""Gets the vmr_pkg_id of this QueryVmrResultDTO.
云会议室套餐包的id,仅云会议室返回。
:return: The vmr_pkg_id of this QueryVmrResultDTO.
:rtype: str
"""
return self._vmr_pkg_id
@vmr_pkg_id.setter
def vmr_pkg_id(self, vmr_pkg_id):
"""Sets the vmr_pkg_id of this QueryVmrResultDTO.
云会议室套餐包的id,仅云会议室返回。
:param vmr_pkg_id: The vmr_pkg_id of this QueryVmrResultDTO.
:type vmr_pkg_id: str
"""
self._vmr_pkg_id = vmr_pkg_id
@property
def vmr_pkg_name(self):
"""Gets the vmr_pkg_name of this QueryVmrResultDTO.
云会议室套餐包的名称,仅云会议室返回。
:return: The vmr_pkg_name of this QueryVmrResultDTO.
:rtype: str
"""
return self._vmr_pkg_name
@vmr_pkg_name.setter
def vmr_pkg_name(self, vmr_pkg_name):
"""Sets the vmr_pkg_name of this QueryVmrResultDTO.
云会议室套餐包的名称,仅云会议室返回。
:param vmr_pkg_name: The vmr_pkg_name of this QueryVmrResultDTO.
:type vmr_pkg_name: str
"""
self._vmr_pkg_name = vmr_pkg_name
@property
def vmr_pkg_parties(self):
"""Gets the vmr_pkg_parties of this QueryVmrResultDTO.
云会议室套餐包的会议并发方数,仅云会议室返回。
:return: The vmr_pkg_parties of this QueryVmrResultDTO.
:rtype: int
"""
return self._vmr_pkg_parties
@vmr_pkg_parties.setter
def vmr_pkg_parties(self, vmr_pkg_parties):
"""Sets the vmr_pkg_parties of this QueryVmrResultDTO.
云会议室套餐包的会议并发方数,仅云会议室返回。
:param vmr_pkg_parties: The vmr_pkg_parties of this QueryVmrResultDTO.
:type vmr_pkg_parties: int
"""
self._vmr_pkg_parties = vmr_pkg_parties
@property
def status(self):
"""Gets the status of this QueryVmrResultDTO.
云会议室状态。 * 0:正常 * 1:停用 * 2:未分配
:return: The status of this QueryVmrResultDTO.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this QueryVmrResultDTO.
云会议室状态。 * 0:正常 * 1:停用 * 2:未分配
:param status: The status of this QueryVmrResultDTO.
:type status: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueryVmrResultDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
4343e9f12d9d946fc5c4a3f33017b92beb14184b
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/mmseg/models/backbones/two_stream_swin_transformer.py
|
090ed022ef0fb22ed15435e997d6c059b0e5d71a
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
two_stream_swin_transformer.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvmodels
from torch import einsum
from einops import rearrange, repeat
import timm.models as tms
from ..builder import BACKBONES
@BACKBONES.register_module()
class TwoStreamSwinTransformer(nn.Module):
arch_dict = {
}
def __init__(self,variant,pretrained=False):
super().__init__()
if variant == 'swin_large_patch4_window12_384_in22k':
model = tms.swin_large_patch4_window12_384_in22k(pretrained=pretrained)
self.img_shapes = [48,24,12,12]
self.channel_shapes = [384,768,1536,1536]
elif variant == 'swin_base_patch4_window7_224_in22k':
model = tms.swin_base_patch4_window7_224_in22k(pretrained=pretrained)
self.img_shapes = [28,14,7,7]
self.channel_shapes = [384,768,1536,1536]
elif variant == 'swin_base_patch4_window12_384_in22k':
model = tms.swin_base_patch4_window12_384_in22k(pretrained=pretrained)
self.img_shapes = [48,24,12,12]
self.channel_shapes = [256,512,1024,1024]
else:
raise NotImplementedError
self.patch_embed = model.patch_embed
self.layers = model.layers
self.absolute_pos_embed = model.absolute_pos_embed
self.pos_drop = model.pos_drop
def basic_forward(self,x):
out = []
x = self.patch_embed(x)
if self.absolute_pos_embed is not None:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for idx,layer in enumerate(self.layers):
x = layer(x)
b,hw,d = x.shape
h = w = self.img_shapes[idx]
out.append(x.view(b,d,h,w))
return out
def forward(self,x):
b,n,c,h,w = x.shape
x0,x1 = x.chunk(dim=1,chunks=2)
x0,x1 = x0.squeeze(1),x1.squeeze(1)
ys_0 = self.basic_forward(x0)
ys_1 = self.basic_forward(x1)
# for i in ys_0:
# print(i.shape)
return [i-j for i,j in zip(ys_0,ys_1)]
def init_weights(self, pretrained=None):
pass
|
3ad62817d2eab383967225c34c320420261b4c92
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_testloader.py
|
0d967b051fdad860ef792176afb6c8da07bca13e
|
[
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
test_testloader.py
|
from __future__ import unicode_literals
import os
import sys
import tempfile
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from mozlog import structured
from wptrunner.testloader import TestFilter as Filter
from .test_chunker import make_mock_manifest
structured.set_default_logger(structured.structuredlog.StructuredLogger("TestLoader"))
include_ini = """\
skip: true
[test_\u53F0]
skip: false
"""
def test_filter_unicode():
tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
("test", "c", 10))
with tempfile.NamedTemporaryFile("wb", suffix=".ini") as f:
f.write(include_ini.encode('utf-8'))
f.flush()
Filter(manifest_path=f.name, test_manifests=tests)
|
86dbdac95f10044515501ef94d5cb042c2d140ba
|
c6fc980cac6bb0e0345ff678f1aade74f1efe8b2
|
/rico/utils.py
|
5210744e8e54037d2c137e6995f2e30485780c6e
|
[] |
no_license
|
yzygitzh/Humanoid
|
f41d6d51601eb3c1435cf4dc72be70895ea6b70a
|
b0b951154e32ec76473595c76cb566f719b3c4ca
|
refs/heads/master
| 2023-03-02T09:12:56.156211
| 2023-02-18T10:33:14
| 2023-02-18T10:36:29
| 137,621,755
| 111
| 51
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,604
|
py
|
utils.py
|
#coding=utf-8
import numpy as np
from matplotlib import pyplot as plt
def traverse_view_tree(view_tree, call_back, semantic_ui=False):
if view_tree is None or not semantic_ui and not is_view_valid(view_tree):
return
call_back(view_tree)
if "children" in view_tree:
for child in view_tree["children"]:
traverse_view_tree(child, call_back, semantic_ui)
def is_view_hierarchy_valid(view_tree, config_json, semantic_ui=False):
origin_dim = config_json["origin_dim"]
if semantic_ui:
view_root_bounds = view_tree["bounds"]
else:
view_root_bounds = view_tree["activity"]["root"]["bounds"]
# skip full-screen horizon ones
if view_root_bounds[2] > view_root_bounds[3] and view_root_bounds[2] > origin_dim[0]:
return False
return True
def compute_view_offset(view_tree, config_json, semantic_ui=False):
if semantic_ui:
view_root_bounds = view_tree["bounds"]
else:
view_root_bounds = view_tree["activity"]["root"]["bounds"]
downscale_dim = config_json["downscale_dim"]
origin_dim = config_json["origin_dim"]
status_bar_height = config_json["status_bar_height"]
navigation_bar_height = config_json["navigation_bar_height"]
downscale_ratio = downscale_dim[0] / origin_dim[0]
view_offset = [0, 0]
if semantic_ui:
root_view = view_tree
else:
root_view = view_tree["activity"]["root"]
# heuristically identify non-full-screen window like permission window
if not root_view["class"].startswith("com.android.internal.policy.PhoneWindow"):
return view_offset
# view_tree from DroidBot may not contain activity_name
if "activity_name" in view_tree and not view_tree["activity_name"] == "com.android.packageinstaller/com.android.packageinstaller.permission.ui.GrantPermissionsActivity":
return view_offset
if view_root_bounds[2] - view_root_bounds[0] < origin_dim[0] and \
view_root_bounds[3] - view_root_bounds[1] < origin_dim[1] - status_bar_height - navigation_bar_height:
view_center = [(view_root_bounds[0] + view_root_bounds[2]) / 2,
(view_root_bounds[1] + view_root_bounds[3]) / 2]
view_offset = [int((origin_dim[0] / 2 - view_center[0]) * downscale_ratio),
int(((origin_dim[1] + status_bar_height - navigation_bar_height) / 2 - view_center[1]) * downscale_ratio)]
return view_offset
def is_view_valid(view):
visible_to_user = view["visible-to-user"]
if not visible_to_user:
return False
if "bounds" not in view or "rel-bounds" not in view:
return False
bounds = view["bounds"]
rel_bounds = view["rel-bounds"]
if (bounds[0] >= bounds[2] or bounds[1] >= bounds[3] or \
rel_bounds[0] >= rel_bounds[2] or rel_bounds[1] >= rel_bounds[3]):
return False
if ((rel_bounds[2] - rel_bounds[0]) < (bounds[2] - bounds[0]) or \
(rel_bounds[3] - rel_bounds[1]) < (bounds[3] - bounds[1])):
return False
return True
def is_text_view(view):
if "text" not in view:
return False
for ancestor in view["ancestors"]:
if "edittext" in ancestor.lower():
return True
return "edittext" in view["class"].lower()
def is_valid_data(image, interact, config_json):
if interact is None:
return False
text_dim = config_json["text_dim"]
image_dim = config_json["image_dim"]
interact_dim = config_json["interact_dim"]
if np.sum(image[:, :, text_dim]) + np.sum(image[:, :, image_dim]) == 0:
return False
if np.sum(image[:, :, interact_dim]) == 0:
return False
return True
def get_text_view_signature(view):
signature = ""
# class
signature += "[class]"
if "class" in view:
signature += view["class"]
# resource_id
signature += "[resource_id]"
if "resource_id" in view:
signature += view["resource_id"]
# text_hint
signature += "[text_hint]"
if "text_hint" in view:
signature += view["text_hint"]
# pointer
signature += "[pointer]"
if "pointer" in view:
signature += view["pointer"]
return signature
def visualize_data(data, label=""):
image_full = np.zeros([data.shape[1], data.shape[0], 3], dtype=np.float32)
for i in range(data.shape[2]):
image_full[:, :, i] = data[:, :, i].T
max_val = np.max(image_full[:, :, i])
if max_val > 0:
image_full[:, :, i] /= max_val
plt.imshow(image_full, interpolation="nearest")
plt.xlabel(label)
plt.show()
|
c40610f8cf0201eeb567cc428893d2e6cba27086
|
3a05bb0d4a598d18e9c90c0a35bd44556fcd94d3
|
/ask-smapi-model/ask_smapi_model/v1/skill/manifest/__init__.py
|
b2bd35ebc1880b151c3f4181b94204ca1eb9b141
|
[
"Apache-2.0"
] |
permissive
|
alexa/alexa-apis-for-python
|
2e21330c7e35b76e4f360aa72a9e789906f3bca1
|
751e6dbbb829ceb34dd7405eb77235c06b19c612
|
refs/heads/master
| 2023-08-28T12:50:05.522913
| 2023-08-21T16:04:58
| 2023-08-21T16:04:58
| 145,045,841
| 119
| 34
|
Apache-2.0
| 2022-07-19T16:40:29
| 2018-08-16T22:40:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,878
|
py
|
__init__.py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
from __future__ import absolute_import
from .audio_interface import AudioInterface
from .interface import Interface
from .free_trial_information import FreeTrialInformation
from .localized_flash_briefing_info import LocalizedFlashBriefingInfo
from .demand_response_apis import DemandResponseApis
from .automatic_distribution import AutomaticDistribution
from .video_region import VideoRegion
from .authorized_client_lwa_application_android import AuthorizedClientLwaApplicationAndroid
from .music_content_name import MusicContentName
from .custom_product_prompts import CustomProductPrompts
from .event_name_type import EventNameType
from .skill_manifest_localized_privacy_and_compliance import SkillManifestLocalizedPrivacyAndCompliance
from .video_fire_tv_catalog_ingestion import VideoFireTvCatalogIngestion
from .friendly_name import FriendlyName
from .up_channel_items import UpChannelItems
from .health_interface import HealthInterface
from .subscription_payment_frequency import SubscriptionPaymentFrequency
from .source_language_for_locales import SourceLanguageForLocales
from .flash_briefing_update_frequency import FlashBriefingUpdateFrequency
from .smart_home_protocol import SmartHomeProtocol
from .lambda_endpoint import LambdaEndpoint
from .video_prompt_name import VideoPromptName
from .event_name import EventName
from .music_content_type import MusicContentType
from .music_apis import MusicApis
from .music_alias import MusicAlias
from .knowledge_apis import KnowledgeApis
from .manifest_gadget_support import ManifestGadgetSupport
from .dialog_management import DialogManagement
from .offer_type import OfferType
from .house_hold_list import HouseHoldList
from .linked_common_schemes import LinkedCommonSchemes
from .interface_type import InterfaceType
from .authorized_client_lwa import AuthorizedClientLwa
from .lambda_region import LambdaRegion
from .video_catalog_info import VideoCatalogInfo
from .extension_initialization_request import ExtensionInitializationRequest
from .dialog_manager import DialogManager
from .custom_apis import CustomApis
from .voice_profile_feature import VoiceProfileFeature
from .skill_manifest import SkillManifest
from .data_store_package import DataStorePackage
from .alexa_data_store_package_manager_interface import AlexaDataStorePackageManagerInterface
from .automatic_cloned_locale import AutomaticClonedLocale
from .marketplace_pricing import MarketplacePricing
from .alexa_presentation_apl_interface import AlexaPresentationAplInterface
from .alexa_presentation_html_interface import AlexaPresentationHtmlInterface
from .tax_information import TaxInformation
from .viewport_specification import ViewportSpecification
from .skill_manifest_privacy_and_compliance import SkillManifestPrivacyAndCompliance
from .authorized_client import AuthorizedClient
from .permission_items import PermissionItems
from .alexa_for_business_interface import AlexaForBusinessInterface
from .version import Version
from .app_link import AppLink
from .alexa_search import AlexaSearch
from .music_interfaces import MusicInterfaces
from .region import Region
from .alexa_for_business_apis import AlexaForBusinessApis
from .authorized_client_lwa_application import AuthorizedClientLwaApplication
from .supported_controls_type import SupportedControlsType
from .skill_manifest_events import SkillManifestEvents
from .custom_connections import CustomConnections
from .extension_request import ExtensionRequest
from .flash_briefing_genre import FlashBriefingGenre
from .alexa_for_business_interface_request import AlexaForBusinessInterfaceRequest
from .smart_home_apis import SmartHomeApis
from .localized_music_info import LocalizedMusicInfo
from .video_prompt_name_type import VideoPromptNameType
from .amazon_conversations_dialog_manager import AMAZONConversationsDialogManager
from .play_store_common_scheme_name import PlayStoreCommonSchemeName
from .catalog_type import CatalogType
from .knowledge_apis_enablement_channel import KnowledgeApisEnablementChannel
from .app_link_interface import AppLinkInterface
from .music_request import MusicRequest
from .permission_name import PermissionName
from .display_interface_template_version import DisplayInterfaceTemplateVersion
from .skill_manifest_localized_publishing_information import SkillManifestLocalizedPublishingInformation
from .custom_task import CustomTask
from .skill_manifest_envelope import SkillManifestEnvelope
from .dialog_delegation_strategy import DialogDelegationStrategy
from .locales_by_automatic_cloned_locale import LocalesByAutomaticClonedLocale
from .currency import Currency
from .distribution_countries import DistributionCountries
from .localized_name import LocalizedName
from .linked_application import LinkedApplication
from .distribution_mode import DistributionMode
from .custom_localized_information_dialog_management import CustomLocalizedInformationDialogManagement
from .supported_controls import SupportedControls
from .video_apis import VideoApis
from .localized_knowledge_information import LocalizedKnowledgeInformation
from .linked_android_common_intent import LinkedAndroidCommonIntent
from .gadget_controller_interface import GadgetControllerInterface
from .event_publications import EventPublications
from .connections_payload import ConnectionsPayload
from .shopping_kit import ShoppingKit
from .ios_app_store_common_scheme_name import IOSAppStoreCommonSchemeName
from .manifest_version import ManifestVersion
from .viewport_shape import ViewportShape
from .alexa_data_store_package_manager_implemented_interface import AlexaDataStorePackageManagerImplementedInterface
from .catalog_info import CatalogInfo
from .display_interface_apml_version import DisplayInterfaceApmlVersion
from .video_app_interface import VideoAppInterface
from .tax_information_category import TaxInformationCategory
from .android_common_intent_name import AndroidCommonIntentName
from .music_capability import MusicCapability
from .subscription_information import SubscriptionInformation
from .viewport_mode import ViewportMode
from .flash_briefing_apis import FlashBriefingApis
from .flash_briefing_content_type import FlashBriefingContentType
from .custom_localized_information import CustomLocalizedInformation
from .gadget_support_requirement import GadgetSupportRequirement
from .game_engine_interface import GameEngineInterface
from .localized_flash_briefing_info_items import LocalizedFlashBriefingInfoItems
from .catalog_name import CatalogName
from .skill_manifest_endpoint import SkillManifestEndpoint
from .music_wordmark import MusicWordmark
from .paid_skill_information import PaidSkillInformation
from .video_feature import VideoFeature
from .skill_manifest_publishing_information import SkillManifestPublishingInformation
from .android_custom_intent import AndroidCustomIntent
from .app_link_v2_interface import AppLinkV2Interface
from .video_apis_locale import VideoApisLocale
from .music_feature import MusicFeature
from .ssl_certificate_type import SSLCertificateType
from .alexa_for_business_interface_request_name import AlexaForBusinessInterfaceRequestName
from .video_country_info import VideoCountryInfo
from .lambda_ssl_certificate_type import LambdaSSLCertificateType
from .skill_manifest_apis import SkillManifestApis
from .display_interface import DisplayInterface
|
fab021a207bfba2ee358eec5e1e898e7a0a5b5c2
|
85d2c76b8a646c4183fb095c2b5a2e2a916d70a8
|
/solution/binary_search/2776/main.py
|
08f07e05a1c7e2eecf6ddf1ab2e6877928a8de83
|
[
"MIT"
] |
permissive
|
tony9402/baekjoon
|
448213dcbdf74b58b6d87191ac54c286bad32f29
|
71f9bfc1f4ed66b97ffc0aff2d17389a095a7d11
|
refs/heads/main
| 2023-08-16T17:54:08.533578
| 2023-08-16T16:36:36
| 2023-08-16T16:36:36
| 200,492,112
| 4,828
| 1,230
|
MIT
| 2023-06-22T11:49:18
| 2019-08-04T12:48:59
|
Python
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
main.py
|
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/93afacc450454aedbd2b0d6667914846
import sys
def input():
return sys.stdin.readline().rstrip()
def binary_search(t):
start, end = 0, len(arr)-1
while start <= end:
mid = (start + end) // 2
if arr[mid] == t:
return 1
elif arr[mid] > t:
end = mid - 1
else:
start = mid + 1
return 0
T = int(input())
for i in range(T):
N = int(input())
arr = list(map(int, input().split()))
M = int(input())
arr2 = list(map(int, input().split()))
arr.sort()
for j in arr2:
print(binary_search(j))
|
0328665c4e04b8ca7785ae0cb105a5db865aebac
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-stl/libstdcpp/smart_ptr/TestDataFormatterStdSmartPtr.py
|
a341c517663544c6729920cca78bb14bde510d13
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
TestDataFormatterStdSmartPtr.py
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class StdSmartPtrDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libstdcxx"])
def test_with_run_command(self):
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_source_regexp(
self, "Set break point at this line.")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
self.expect("frame variable nsp", substrs=['nsp = nullptr'])
self.expect("frame variable isp", substrs=['isp = 123'])
self.expect("frame variable ssp", substrs=['ssp = "foobar"'])
self.expect("frame variable nwp", substrs=['nwp = nullptr'])
self.expect("frame variable iwp", substrs=['iwp = 123'])
self.expect("frame variable swp", substrs=['swp = "foobar"'])
self.runCmd("continue")
self.expect("frame variable nsp", substrs=['nsp = nullptr'])
self.expect("frame variable isp", substrs=['isp = nullptr'])
self.expect("frame variable ssp", substrs=['ssp = nullptr'])
self.expect("frame variable nwp", substrs=['nwp = nullptr'])
self.expect("frame variable iwp", substrs=['iwp = nullptr'])
self.expect("frame variable swp", substrs=['swp = nullptr'])
|
5ef77c6fdf6cd2639284b4a1df1cd14e92a5f64d
|
9df589466f50c870aa53fd98ecd739750f8fb007
|
/roam_to_git/scrapping.py
|
747758ca5b9260e5ee6652dd2616d8a88ab51a08
|
[
"MIT"
] |
permissive
|
MatthieuBizien/roam-to-git
|
e0f4e389434b65f00b676f5a07a9fc5783cd2c00
|
2381924739a6f848165c483d6d0a2af3cdca1916
|
refs/heads/master
| 2023-08-23T12:02:27.629484
| 2023-05-23T20:27:52
| 2023-05-23T20:27:52
| 243,395,909
| 594
| 164
|
MIT
| 2023-08-11T20:01:32
| 2020-02-27T00:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,622
|
py
|
scrapping.py
|
import atexit
import os
import pdb
import sys
import time
from pathlib import Path
from typing import List, Optional
import psutil
from loguru import logger
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
ROAM_FORMATS = ("json", "markdown", "edn")
class Browser:
FIREFOX = "Firefox"
PHANTOMJS = "PhantomJS"
CHROME = "Chrome"
def __init__(self, browser, output_directory, headless=True, debug=False):
if browser == Browser.FIREFOX:
logger.trace("Configure Firefox Profile Firefox")
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference("browser.download.folderList", 2)
firefox_profile.set_preference("browser.download.manager.showWhenStarting", False)
firefox_profile.set_preference("browser.download.dir", str(output_directory))
firefox_profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk", "application/zip")
logger.trace("Configure Firefox Profile Options")
firefox_options = webdriver.FirefoxOptions()
if headless:
logger.trace("Set Firefox as headless")
firefox_options.headless = True
logger.trace("Start Firefox")
self.browser = webdriver.Firefox(firefox_profile=firefox_profile,
firefox_options=firefox_options)
elif browser == Browser.PHANTOMJS:
raise NotImplementedError()
# TODO configure
# self.browser = webdriver.PhantomJS()
elif browser == Browser.Chrome:
raise NotImplementedError()
# TODO configure
# self.browser = webdriver.Chrome()
else:
raise ValueError(f"Invalid browser '{browser}")
self.debug = debug
def get(self, url):
if self.debug:
try:
self.browser.get(url)
except Exception:
pdb.set_trace()
else:
self.browser.get(url)
def find_element_by_css_selector(self, css_selector, check=True) -> "HTMLElement":
if self.debug and check:
try:
return self.browser.find_element_by_css_selector(css_selector)
except NoSuchElementException:
pdb.set_trace()
raise
element = self.browser.find_element_by_css_selector(css_selector)
return HTMLElement(element, debug=self.debug)
def find_element_by_link_text(self, text) -> "HTMLElement":
elements = self.browser.find_elements_by_link_text(text)
if len(elements) != 1:
if self.debug:
pdb.set_trace()
elements_str = [e.text for e in elements]
raise ValueError(
f"Got {len(elements)} elements, expected 1 for {text}: {elements_str}")
element, = elements
return HTMLElement(element, debug=self.debug)
def close(self):
self.browser.close()
class HTMLElement:
def __init__(self, html_element: webdriver.remote.webelement.WebElement, debug=False):
self.html_element = html_element
self.debug = debug
def click(self):
if self.debug:
try:
return self.html_element.click()
except Exception:
pdb.set_trace()
else:
return self.html_element.click()
def send_keys(self, keys: str):
if self.debug:
try:
return self.html_element.send_keys(keys)
except Exception:
pdb.set_trace()
else:
return self.html_element.send_keys(keys)
@property
def text(self) -> str:
return self.html_element.text
class Config:
def __init__(self,
browser: str,
database: Optional[str],
debug: bool,
gui: bool,
sleep_duration: float = 2.,
browser_args: Optional[List[str]] = None):
self.user = os.environ["ROAMRESEARCH_USER"]
self.password = os.environ["ROAMRESEARCH_PASSWORD"]
assert self.user
assert self.password
if database:
self.database: Optional[str] = database
else:
self.database = os.environ["ROAMRESEARCH_DATABASE"]
assert self.database, "Please define the Roam database you want to backup."
self.debug = debug
self.gui = gui
self.sleep_duration = sleep_duration
self.browser = getattr(Browser, browser.upper())
self.browser_args = (browser_args or [])
def download_rr_archive(output_type: str,
output_directory: Path,
config: Config,
slow_motion=10,
):
logger.debug("Creating browser")
browser = Browser(browser=config.browser,
headless=not config.gui,
debug=config.debug,
output_directory=output_directory)
if config.debug:
pass
try:
return _download_rr_archive(browser, output_type, output_directory, config)
except (KeyboardInterrupt, SystemExit):
logger.debug("Closing browser on interrupt {}", output_type)
browser.close()
logger.debug("Closed browser {}", output_type)
raise
finally:
logger.debug("Closing browser {}", output_type)
browser.close()
logger.debug("Closed browser {}", output_type)
def _download_rr_archive(browser: Browser,
output_type: str,
output_directory: Path,
config: Config,
):
"""Download an archive in RoamResearch.
:param output_type: Download JSON or Markdown or EDN
:param output_directory: Directory where to stock the outputs
"""
signin(browser, config, sleep_duration=config.sleep_duration)
if config.database:
go_to_database(browser, config.database)
logger.debug("Wait for interface to load")
dot_button = None
for _ in range(100):
# Starting is a little bit slow, so we wait for the button that signal it's ok
time.sleep(config.sleep_duration)
try:
dot_button = browser.find_element_by_css_selector(".bp3-icon-more", check=False)
break
except NoSuchElementException:
pass
# If we have multiple databases, we will be stuck. Let's detect that.
time.sleep(config.sleep_duration)
try:
strong = browser.find_element_by_css_selector("strong", check=False)
except NoSuchElementException:
continue
if "database's you are an admin of" == strong.text.lower():
logger.error(
"You seems to have multiple databases. Please select it with the option "
"--database")
sys.exit(1)
assert dot_button is not None, "All roads leads to Roam, but that one is too long. Try " \
"again when Roam servers are faster."
# Click on something empty to remove the eventual popup
# "Sync Quick Capture Notes with Workspace"
# TODO browser.mouse.click(0, 0)
dot_button.click()
logger.debug("Launch download popup")
export_all = browser.find_element_by_link_text("Export All")
export_all.click()
time.sleep(config.sleep_duration)
# Configure download type
dropdown_button = browser.find_element_by_css_selector(".bp3-dialog .bp3-button-text")
if output_type.lower() != dropdown_button.text.lower():
logger.debug("Changing output type to {}", output_type)
dropdown_button.click()
output_type_elem = browser.find_element_by_link_text(output_type.upper())
output_type_elem.click()
# defensive check
assert dropdown_button.text.lower() == output_type.lower(), (dropdown_button.text, output_type)
# Click on "Export All"
export_all_confirm = browser.find_element_by_css_selector(".bp3-intent-primary")
export_all_confirm.click()
logger.debug("Wait download of {} to {}", output_type, output_directory)
for i in range(1, 60 * 10):
time.sleep(1)
if i % 60 == 0:
logger.debug("Keep waiting for {}, {}s elapsed", output_type, i)
for file in output_directory.iterdir():
if file.name.endswith(".zip"):
logger.debug("File {} found for {}", file, output_type)
time.sleep(1)
return
logger.debug("Waiting too long {}")
raise FileNotFoundError("Impossible to download {} in {}", output_type, output_directory)
def signin(browser: Browser, config: Config, sleep_duration=1.):
"""Sign-in into Roam"""
logger.debug("Opening signin page")
browser.get('https://roamresearch.com/#/signin')
logger.debug("Waiting for email and password fields", config.user)
while True:
try:
email_elem = browser.find_element_by_css_selector("input[name='email']", check=False)
passwd_elem = browser.find_element_by_css_selector("input[name='password']")
logger.debug("Fill email '{}'", config.user)
email_elem.send_keys(config.user)
logger.debug("Fill password")
passwd_elem.send_keys(config.password)
logger.debug("Defensive check: verify that the user input field is correct")
time.sleep(sleep_duration)
email_elem = browser.find_element_by_css_selector("input[name='email']", check=False)
if email_elem.html_element.get_attribute('value') != config.user:
continue
logger.debug("Activating sign-in")
passwd_elem.send_keys(Keys.RETURN)
break
except NoSuchElementException:
logger.trace("NoSuchElementException: Retry getting the email field")
time.sleep(1)
except StaleElementReferenceException:
logger.trace("StaleElementReferenceException: Retry getting the email field")
time.sleep(1)
def go_to_database(browser, database):
"""Go to the database page"""
url = f'https://roamresearch.com/#/app/{database}'
logger.debug(f"Load database from url '{url}'")
browser.get(url)
def _kill_child_process(timeout=50):
procs = psutil.Process().children(recursive=True)
if not procs:
return
logger.debug("Terminate child process {}", procs)
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, still_alive = psutil.wait_procs(procs, timeout=timeout)
if still_alive:
logger.warning(f"Kill child process {still_alive} that was still alive after "
f"'timeout={timeout}' from 'terminate()' command")
for p in still_alive:
try:
p.kill()
except psutil.NoSuchProcess:
pass
def scrap(zip_path: Path, formats: List[str], config: Config):
# Register to always kill child process when the script close, to not have zombie process.
# TODO: is is still needed with Selenium?
if not config.debug:
atexit.register(_kill_child_process)
for f in formats:
format_zip_path = zip_path / f
format_zip_path.mkdir(exist_ok=True)
download_rr_archive(f, format_zip_path, config=config)
|
7f811978566a7ab0ef7750fc50e0af568d518405
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/utilities/forms/fields/__init__.py
|
7f9f4b409406f289115d17a1b823dd05aa88e53a
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
__init__.py
|
from .array import *
from .content_types import *
from .csv import *
from .dynamic import *
from .expandable import *
from .fields import *
|
c2758378e268e3d607198fbd79acc8b9c6fbdf1d
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Playing_Sounds_and_Using_Buttons_with_Raspberry_Pi/simple-jukebox.py
|
9479b8e94e65a91ad5393d98cf9924650999af01
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
simple-jukebox.py
|
# SPDX-FileCopyrightText: 2018 Mikey Sklar for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# This script requires a Raspberry Pi 2, 3 or Zero. Circuit Python must
# be installed and it is strongly recommended that you use the latest
# release of Raspbian.
import time
from os import listdir
import subprocess
import board
import digitalio
button1 = digitalio.DigitalInOut(board.D23)
button1.direction = digitalio.Direction.INPUT
button1.pull = digitalio.Pull.UP
button2 = digitalio.DigitalInOut(board.D24)
button2.direction = digitalio.Direction.INPUT
button2.pull = digitalio.Pull.UP
button3 = digitalio.DigitalInOut(board.D25)
button3.direction = digitalio.Direction.INPUT
button3.pull = digitalio.Pull.UP
mp3_files = [ f for f in listdir('.') if f[-4:] == '.mp3' ]
if not len(mp3_files) > 0:
print("No mp3 files found!")
print('--- Available mp3 files ---')
print(mp3_files)
print('--- Press button #1 to select mp3, button #2 to play current. ---')
index = 0
while True:
if not button1.value:
index += 1
if index >= len(mp3_files):
index = 0
print("--- " + mp3_files[index] + " ---")
if not button2.value:
subprocess.Popen(['omxplayer', mp3_files[index]])
print('--- Playing ' + mp3_files[index] + ' ---')
print('--- Press button #3 to clear playing mp3s. ---')
time.sleep(0.25)
if not button3.value:
subprocess.call(['killall', 'omxplayer'])
print('--- Cleared all existing mp3s. ---')
time.sleep(0.25)
|
b7aa1ab98cc0f709c4c177c14aac55a75c906156
|
d6a43cbb975c0d5dd2465d6f09c43767d35c121a
|
/tests/test_plugins_mark_as_read.py
|
affd436df0b7d85dde7bab5c58e487aeb1d6ad90
|
[
"BSD-3-Clause"
] |
permissive
|
lemon24/reader
|
a76f5fd3f8dbf9d86e3627bbf9a60732414721cb
|
5e1682c9bfa36d341c03ab804adfb95cfc53f26e
|
refs/heads/master
| 2023-08-17T00:38:03.405077
| 2023-08-16T21:11:25
| 2023-08-16T21:11:25
| 115,272,183
| 349
| 24
|
BSD-3-Clause
| 2022-06-20T19:37:32
| 2017-12-24T15:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
test_plugins_mark_as_read.py
|
import json
import os
import pytest
from fakeparser import Parser
from utils import naive_datetime
from utils import utc_datetime as datetime
def test_regex_mark_as_read(make_reader):
key = '.reader.mark-as-read'
value = {'title': ['^match']}
reader = make_reader(':memory:', plugins=['reader.mark_as_read'])
def get_entry_data(**kwargs):
return {
(e.id, e.read, e.read_modified, e.important, e.important_modified)
for e in reader.get_entries(**kwargs)
}
parser = Parser()
reader._parser = parser
one = parser.feed(1, datetime(2010, 1, 1))
parser.entry(1, 1, datetime(2010, 1, 1), title='match old')
reader._now = lambda: naive_datetime(2010, 1, 1)
reader.add_feed(one)
reader.update_feeds()
reader.set_tag(one, key, value)
one = parser.feed(1, datetime(2010, 1, 2))
match_new = parser.entry(1, 2, datetime(2010, 1, 2), title='match new')
parser.entry(1, 3, datetime(2010, 1, 2), title='no match')
two = parser.feed(2, datetime(2010, 1, 2))
parser.entry(2, 3, datetime(2010, 1, 2), title='match other')
reader._now = lambda: naive_datetime(2010, 2, 1)
reader.add_feed(two)
reader.update_feeds()
assert len(list(reader.get_entries())) == 4
assert get_entry_data(read=True) == {
(match_new.id, True, None, False, None),
}
parser.entry(1, 3, datetime(2010, 1, 2), title='no match once again')
reader._now = lambda: naive_datetime(2010, 3, 1)
reader.update_feeds()
assert get_entry_data(read=True) == {
(match_new.id, True, None, False, None),
}
@pytest.mark.parametrize('value', ['x', {'title': 'x'}, {'title': [1]}])
def test_regex_mark_as_read_bad_metadata(make_reader, value):
reader = make_reader(':memory:', plugins=['reader.mark_as_read'])
parser = Parser()
reader._parser = parser
one = parser.feed(1, datetime(2010, 1, 1))
parser.entry(1, 1, datetime(2010, 1, 1), title='match')
reader.add_feed(one)
reader.set_tag(one, '.reader.mark-as-read', value)
reader.update_feeds()
assert [e.read for e in reader.get_entries()] == [False]
def test_entry_deleted(make_reader):
def delete_entry_plugin(reader):
def hook(reader, entry, _):
if entry.resource_id == ('1', '1, 1'):
reader._storage.delete_entries([entry.resource_id])
reader.after_entry_update_hooks.append(hook)
reader = make_reader(
':memory:', plugins=[delete_entry_plugin, 'reader.mark_as_read']
)
reader._parser = parser = Parser()
one = parser.feed(1)
reader.add_feed(one)
reader.set_tag(one, '.reader.mark-as-read', {'title': ['one']})
parser.entry(1, 1, title='one')
parser.entry(1, 2, title='two')
# shouldn't fail
reader.update_feeds()
assert {eval(e.id)[1] for e in reader.get_entries()} == {2}
def test_missing_title(make_reader):
reader = make_reader(':memory:', plugins=['reader.mark_as_read'])
reader._parser = parser = Parser()
one = parser.feed(1)
reader.add_feed(one)
parser.entry(1, 1, title=None)
parser.entry(1, 2, title='')
parser.entry(1, 3, title='three')
reader.set_tag(one, '.reader.mark-as-read', {'title': ['^$']})
# shouldn't fail
reader.update_feeds()
assert {eval(e.id)[1] for e in reader.get_entries(read=True)} == {1, 2}
|
ee96af55d687c9e63b9fde63acb648ed970b9ce5
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/orca/src/bigdl/orca/learn/trigger.py
|
6ec0691b9c832f7d6cfb3f33010f3a1eed1a2638
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
trigger.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from typing import (
Union,
Optional
)
if TYPE_CHECKING:
from bigdl.dllib.optim.optimizer import EveryEpoch as DLlibEveryEpoch
from bigdl.dllib.optim.optimizer import SeveralIteration as DLlibSeveralIteration
class Trigger(ABC):
@abstractmethod
def get_trigger(self):
pass
@staticmethod
def convert_trigger(trigger: Optional[Union[str, "Trigger"]]):
if trigger is None:
return None
if isinstance(trigger, str):
if trigger.lower() == "everyepoch":
return EveryEpoch().get_trigger()
else:
from bigdl.dllib.utils.log4Error import invalidInputError
invalidInputError(False,
"Only 'EveryEpoch', orca triggers and bigdl triggers are "
"supported now")
elif isinstance(trigger, Trigger):
return trigger.get_trigger()
else:
return trigger
class EveryEpoch(Trigger):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
"""
def __init__(self) -> None:
from bigdl.dllib.optim.optimizer import EveryEpoch
self.trigger = EveryEpoch()
def get_trigger(self) -> "DLlibEveryEpoch":
return self.trigger
class SeveralIteration(Trigger):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
"""
def __init__(self, interval: int) -> None:
from bigdl.dllib.optim.optimizer import SeveralIteration
self.trigger = SeveralIteration(interval)
def get_trigger(self) -> "DLlibSeveralIteration":
return self.trigger
|
12ae7600321046218659f17168b0f54dee5400c1
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-CoreServices/PyObjCTest/test_sksearch.py
|
466700b92c73dbc7bb003d9164344c364d7172a1
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,266
|
py
|
test_sksearch.py
|
import CoreServices
from PyObjCTools.TestSupport import TestCase, os_level_key, os_release
import objc
class TestSKSearch(TestCase):
def testTypes(self):
self.assertIsInstance(CoreServices.SKSearchGroupRef, objc.objc_class)
self.assertIsInstance(CoreServices.SKSearchResultsRef, objc.objc_class)
self.assertIsInstance(CoreServices.SKSearchRef, objc.objc_class)
def testConstants(self):
self.assertEqual(CoreServices.kSKSearchRanked, 0)
self.assertEqual(CoreServices.kSKSearchBooleanRanked, 1)
self.assertEqual(CoreServices.kSKSearchRequiredRanked, 2)
self.assertEqual(CoreServices.kSKSearchPrefixRanked, 3)
self.assertEqual(CoreServices.kSKSearchOptionDefault, 0)
self.assertEqual(CoreServices.kSKSearchOptionNoRelevanceScores, 1)
self.assertEqual(CoreServices.kSKSearchOptionSpaceMeansOR, 2)
self.assertEqual(CoreServices.kSKSearchOptionFindSimilar, 4)
def testFunctions(self):
self.assertIsInstance(CoreServices.SKSearchGroupGetTypeID(), int)
self.assertIsInstance(CoreServices.SKSearchResultsGetTypeID(), int)
self.assertIsInstance(CoreServices.SKSearchGetTypeID(), int)
data = CoreServices.NSMutableData.data()
index = CoreServices.SKIndexCreateWithMutableData(
data, "pyobjc.test", CoreServices.kSKIndexInverted, None
)
self.assertIsInstance(index, CoreServices.SKIndexRef)
doc = CoreServices.SKDocumentCreateWithURL(
CoreServices.CFURLCreateWithFileSystemPath(
None,
"/Library/Documentation/Acknowledgements.rtf",
CoreServices.kCFURLPOSIXPathStyle,
False,
)
)
doc2 = CoreServices.SKDocumentCreateWithURL(
CoreServices.CFURLCreateWithFileSystemPath(
None,
"/Library/Documentation/iPod/Acknowledgements.rtf",
CoreServices.kCFURLPOSIXPathStyle,
False,
)
)
CoreServices.SKIndexAddDocumentWithText(
index, doc, "copyright and licenses", True
)
CoreServices.SKIndexAddDocumentWithText(
index, doc2, "copyright and licenses for iPod", True
)
CoreServices.SKIndexFlush(index)
grp = CoreServices.SKSearchGroupCreate([index])
self.assertIsInstance(grp, CoreServices.SKSearchGroupRef)
lst = []
@objc.callbackFor(CoreServices.SKSearchResultsCreateWithQuery)
def callback(idx, doc, ctx):
lst.append([idx, doc, ctx])
return True
ctx = 10
orig_res = CoreServices.SKSearchResultsCreateWithQuery(
grp, "copyright", CoreServices.kSKSearchRequiredRanked, 2, ctx, callback
)
self.assertIsInstance(orig_res, CoreServices.SKSearchResultsRef)
res = CoreServices.SKSearchResultsCreateWithDocuments(
grp, [doc], 10, ctx, callback
)
self.assertIsInstance(res, CoreServices.SKSearchResultsRef)
self.assertGreaterEqual(len(lst), 2)
self.assertEqual(lst[0][0], index)
self.assertIsInstance(lst[0][1], CoreServices.SKDocumentRef)
self.assertEqual(lst[0][2], ctx)
cnt = CoreServices.SKSearchResultsGetCount(orig_res)
self.assertIsInstance(cnt, int)
if os_level_key(os_release()) < os_level_key("10.7"):
# The API does not work on macOS 10.7 or later.
# (Verified with an ObjC reproducer).
# See issue #9
self.assertGreater(cnt, 0)
res = orig_res
cnt = CoreServices.SKSearchResultsGetCount(res)
self.assertIsInstance(cnt, int)
self.assertGreater(cnt, 0)
v, o1, o2, o3 = CoreServices.SKSearchResultsGetInfoInRange(
res, CoreServices.CFRange(0, cnt), None, None, None
)
self.assertIsInstance(v, int)
self.assertIsInstance(o1, tuple)
if o1:
self.assertIsInstance(o1[0], CoreServices.SKDocumentRef)
self.assertIsInstance(o2, tuple)
if o2:
self.assertIsInstance(o2[0], CoreServices.SKIndexRef)
self.assertIsInstance(o3, tuple)
if o3:
self.assertIsInstance(o3[0], float)
v = CoreServices.SKSearchResultsCopyMatchingTerms(res, 1)
# self.assertIsInstance(v, CoreServices.CFArrayRef)
src = CoreServices.SKSearchCreate(
index, "copyright", CoreServices.kSKSearchOptionFindSimilar
)
self.assertIsInstance(src, CoreServices.SKSearchRef)
v, o1, o2, o3 = CoreServices.SKSearchFindMatches(src, 10, None, None, 1.0, None)
self.assertIsInstance(v, bool)
self.assertIsInstance(o1, tuple)
if o1:
self.assertIsInstance(o1[0], int)
self.assertIsInstance(o2, tuple)
if o2:
self.assertIsInstance(o2[0], float)
self.assertIsInstance(o3, int)
v1, v2 = CoreServices.SKIndexCopyInfoForDocumentIDs(index, o3, o1, None, None)
if v1:
self.assertIsInstance(v1[0], str)
self.assertIsInstance(v2, tuple)
if v2:
self.assertIsInstance(v2[0], int)
self.assertArgIsIn(CoreServices.SKIndexCopyDocumentRefsForDocumentIDs, 2)
self.assertArgIsOut(CoreServices.SKIndexCopyDocumentRefsForDocumentIDs, 3)
self.assertArgSizeInArg(
CoreServices.SKIndexCopyDocumentRefsForDocumentIDs, 2, 1
)
self.assertArgSizeInArg(
CoreServices.SKIndexCopyDocumentRefsForDocumentIDs, 3, 1
)
v = CoreServices.SKIndexCopyDocumentRefsForDocumentIDs(index, o3, o1, None)
self.assertIsInstance(v, tuple)
if v:
self.assertIsInstance(v[0], CoreServices.SKDocumentRef)
v = CoreServices.SKIndexCopyDocumentURLsForDocumentIDs(index, o3, o1, None)
self.assertIsInstance(v, tuple)
if v:
self.assertIsInstance(v[0], CoreServices.CFURLRef)
self.assertResultIsCFRetained(CoreServices.SKSearchGroupCopyIndexes)
a = CoreServices.SKSearchGroupCopyIndexes(grp)
self.assertIsInstance(a, CoreServices.CFArrayRef)
CoreServices.SKSearchCancel(src)
|
b24ca829c4d2ed20e66244cf87f898a7d76b70c7
|
8ce453de612c9024207d00d328df183c443c4e2d
|
/NodeGraphQt/widgets/dialogs.py
|
b6e5b40cba8fb0eca76d7d84f01a76a1dcb8efc8
|
[
"MIT"
] |
permissive
|
jchanvfx/NodeGraphQt
|
71f946d7547f6b79c57ee2565d171a30c130a74e
|
4823b43642cce520ee88c5e89a0e1702c26ee9f0
|
refs/heads/main
| 2023-08-29T00:31:18.599300
| 2023-08-22T03:47:44
| 2023-08-22T03:47:44
| 101,639,727
| 1,015
| 255
|
MIT
| 2023-09-14T04:44:45
| 2017-08-28T12:13:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
dialogs.py
|
import os
from Qt import QtWidgets, QtGui, QtCore
_current_user_directory = os.path.expanduser('~')
def _set_dir(file):
global _current_user_directory
if os.path.isdir(file):
_current_user_directory = file
elif os.path.isfile(file):
_current_user_directory = os.path.split(file)[0]
class FileDialog(object):
@staticmethod
def getSaveFileName(parent=None, title='Save File', file_dir=None,
ext_filter='*'):
if not file_dir:
file_dir = _current_user_directory
file_dlg = QtWidgets.QFileDialog.getSaveFileName(
parent, title, file_dir, ext_filter)
file = file_dlg[0] or None
if file:
_set_dir(file)
return file_dlg
@staticmethod
def getOpenFileName(parent=None, title='Open File', file_dir=None,
ext_filter='*'):
if not file_dir:
file_dir = _current_user_directory
file_dlg = QtWidgets.QFileDialog.getOpenFileName(
parent, title, file_dir, ext_filter)
file = file_dlg[0] or None
if file:
_set_dir(file)
return file_dlg
class BaseDialog(object):
@staticmethod
def message_dialog(parent=None, text='', title='Message', dialog_icon=None,
custom_icon=None):
dlg = QtWidgets.QMessageBox(parent=parent)
dlg.setWindowTitle(title)
dlg.setInformativeText(text)
dlg.setStandardButtons(QtWidgets.QMessageBox.Ok)
if custom_icon:
pixmap = QtGui.QPixmap(custom_icon).scaledToHeight(
32, QtCore.Qt.SmoothTransformation
)
dlg.setIconPixmap(pixmap)
else:
if dialog_icon == 'information':
dlg.setIcon(dlg.Information)
elif dialog_icon == 'warning':
dlg.setIcon(dlg.Warning)
elif dialog_icon == 'critical':
dlg.setIcon(dlg.Critical)
dlg.exec_()
@staticmethod
def question_dialog(parent=None, text='', title='Are you sure?',
dialog_icon=None, custom_icon=None):
dlg = QtWidgets.QMessageBox(parent=parent)
dlg.setWindowTitle(title)
dlg.setInformativeText(text)
dlg.setStandardButtons(
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if custom_icon:
pixmap = QtGui.QPixmap(custom_icon).scaledToHeight(
32, QtCore.Qt.SmoothTransformation
)
dlg.setIconPixmap(pixmap)
else:
if dialog_icon == 'information':
dlg.setIcon(dlg.Information)
elif dialog_icon == 'warning':
dlg.setIcon(dlg.Warning)
elif dialog_icon == 'critical':
dlg.setIcon(dlg.Critical)
result = dlg.exec_()
return bool(result == QtWidgets.QMessageBox.Yes)
|
fae05811b840b1a9186b58b7a0501efe4e1c4f7d
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/examples/kafkataxi/kafka_taxi.py
|
1cdd266c3df4bdcbc01359ee0bea47fac8597604
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 5,935
|
py
|
kafka_taxi.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that writes to and reads from Kafka.
This example reads from the PubSub NYC Taxi stream described in
https://github.com/googlecodelabs/cloud-dataflow-nyc-taxi-tycoon, writes to a
given Kafka topic and reads back from the same Kafka topic.
"""
# pytype: skip-file
import logging
import sys
import typing
import apache_beam as beam
from apache_beam.io.kafka import ReadFromKafka
from apache_beam.io.kafka import WriteToKafka
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
def run(
bootstrap_servers,
topic,
with_metadata,
bq_dataset,
bq_table_name,
project,
pipeline_options):
# bootstrap_servers = '123.45.67.89:123:9092'
# topic = 'kafka_taxirides_realtime'
# pipeline_args = ['--project', 'my-project',
# '--runner', 'DataflowRunner',
# '--temp_location', 'my-temp-location',
# '--region', 'my-region',
# '--num_workers', 'my-num-workers']
window_size = 15 # size of the Window in seconds.
def log_ride(ride):
if 'timestamp' in ride:
logging.info(
'Found ride at latitude %r and longitude %r with %r '
'passengers at timestamp %r',
ride['latitude'],
ride['longitude'],
ride['passenger_count'],
ride['timestamp'])
else:
logging.info(
'Found ride at latitude %r and longitude %r with %r '
'passengers',
ride['latitude'],
ride['longitude'],
ride['passenger_count'])
def convert_kafka_record_to_dictionary(record):
# the records have 'value' attribute when --with_metadata is given
if hasattr(record, 'value'):
ride_bytes = record.value
elif isinstance(record, tuple):
ride_bytes = record[1]
else:
raise RuntimeError('unknown record type: %s' % type(record))
# Converting bytes record from Kafka to a dictionary.
import ast
ride = ast.literal_eval(ride_bytes.decode("UTF-8"))
output = {
key: ride[key]
for key in ['latitude', 'longitude', 'passenger_count']
}
if hasattr(record, 'timestamp'):
# timestamp is read from Kafka metadata
output['timestamp'] = record.timestamp
return output
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| beam.io.ReadFromPubSub(
topic='projects/pubsub-public-data/topics/taxirides-realtime').
with_output_types(bytes)
| beam.Map(lambda x: (b'', x)).with_output_types(
typing.Tuple[bytes, bytes]) # Kafka write transforms expects KVs.
| beam.WindowInto(beam.window.FixedWindows(window_size))
| WriteToKafka(
producer_config={'bootstrap.servers': bootstrap_servers},
topic=topic))
ride_col = (
pipeline
| ReadFromKafka(
consumer_config={'bootstrap.servers': bootstrap_servers},
topics=[topic],
with_metadata=with_metadata)
| beam.Map(lambda record: convert_kafka_record_to_dictionary(record)))
if bq_dataset:
schema = 'latitude:STRING,longitude:STRING,passenger_count:INTEGER'
if with_metadata:
schema += ',timestamp:STRING'
_ = (
ride_col
| beam.io.WriteToBigQuery(bq_table_name, bq_dataset, project, schema))
else:
_ = ride_col | beam.FlatMap(lambda ride: log_ride(ride))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--bootstrap_servers',
dest='bootstrap_servers',
required=True,
help='Bootstrap servers for the Kafka cluster. Should be accessible by '
'the runner')
parser.add_argument(
'--topic',
dest='topic',
default='kafka_taxirides_realtime',
help='Kafka topic to write to and read from')
parser.add_argument(
'--with_metadata',
default=False,
action='store_true',
help='If set, also reads metadata from the Kafka broker.')
parser.add_argument(
'--bq_dataset',
type=str,
default='',
help='BigQuery Dataset to write tables to. '
'If set, export data to a BigQuery table instead of just logging. '
'Must already exist.')
parser.add_argument(
'--bq_table_name',
default='xlang_kafka_taxi',
help='The BigQuery table name. Should not already exist.')
known_args, pipeline_args = parser.parse_known_args()
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=True)
# We also require the --project option to access --bq_dataset
project = pipeline_options.view_as(GoogleCloudOptions).project
if project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
run(
known_args.bootstrap_servers,
known_args.topic,
known_args.with_metadata,
known_args.bq_dataset,
known_args.bq_table_name,
project,
pipeline_options)
|
a72e336cbccd7344faed15ab23507106a7de5466
|
25476f58ab74593902c0db71dd8e560dafa5442a
|
/tools/platform-tools/systrace/catapult/common/eslint/bin/run_eslint
|
933415be67b809ca8fb039e8f5897be634245adf
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"NCSA",
"LicenseRef-scancode-unicode",
"LGPL-2.1-only",
"OpenSSL",
"blessing",
"NICTA-1.0",
"LicenseRef-scancode-protobuf",
"GPL-2.0-or-later",
"LicenseRef-scancode-openssl",
"Libpng",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-pcre",
"Zlib"
] |
permissive
|
CanciuCostin/android-spyware
|
859771d8ba17b434f3f330b08d6b28f9b26a5068
|
be9c2989a76214462b9fe5869c79ffbe86151f13
|
refs/heads/master
| 2023-04-11T11:34:01.983825
| 2023-03-26T12:25:01
| 2023-03-26T12:25:01
| 253,235,389
| 360
| 104
|
MIT
| 2023-03-03T12:59:41
| 2020-04-05T12:58:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,711
|
run_eslint
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
_ESLINT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir))
DIRECTORIES_TO_LINT = [
os.path.join(_CATAPULT_PATH, 'dashboard', 'dashboard'),
os.path.join(_CATAPULT_PATH, 'tracing', 'tracing')
]
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_ESLINT_PATH)
import eslint
parser = argparse.ArgumentParser(
description='Wrapper script to run eslint on Catapult code')
parser.add_argument('--paths', '-p', default=None, nargs='+', metavar='PATH',
help='List of paths to lint')
parser.add_argument('--all', default=None, action='store_true',
help='Runs eslint on all applicable Catapult code')
parser.add_argument('--extra-args', default=None, type=str,
help='A string of extra arguments to pass to eslint')
args = parser.parse_args(sys.argv[1:])
if ((args.paths is not None and args.all is not None) or
(args.paths is None and args.all is None)):
print 'Either --paths or --all must be used, but not both.\n'
parser.print_help()
sys.exit(1)
paths = DIRECTORIES_TO_LINT if args.all else args.paths
success, output = eslint.RunEslint(paths, extra_args=args.extra_args)
print output
sys.exit(not success)
|
|
6baefb675ab5d76d8249fc095d7b13d515508c61
|
d72dd2a0364f1cb02f69ecbd6c9b75ba72005d1b
|
/一、工程基础/软件工程/算法与数据结构/力扣题目/leetcode_026.py
|
68d7f3dbcf6d6f63817814351ba41018d92b161b
|
[
"Apache-2.0"
] |
permissive
|
ethan-sui/AI-algorithm-engineer-knowledge
|
bf8baf3bfdf54a8cd0cedeef38809d0447df4527
|
24795b23a4cbeb6e6839e0cae65f7745be666da6
|
refs/heads/main
| 2023-02-16T05:45:44.954543
| 2021-01-13T00:40:37
| 2021-01-13T00:40:37
| 301,581,802
| 221
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
leetcode_026.py
|
# 2、删除排序数组中的重复项
# 一、双指针法,设置快慢指针,当快慢指针指向元素一样时,快指针快走一步,
# 当快慢指针指向元素不同时,同时走一步,并把快指针的元素值赋给慢指针,
# 这样,慢指针走过的元素就是不重复的元素
# 时间复杂度O(n),空间复杂度O(1)
def removeDuplicates0(arr):
if len(arr) == 0:
return 0
left = 0
right = 1
while right < len(arr):
if arr[right] != arr[left]:
left += 1
arr[left] = arr[right]
right += 1
else:
right += 1
return left + 1
nums = [1, 1, 2]
print(removeDuplicates0(nums))
|
9d5d8cc5b1b777a8f5d39659d53b7e4e31c0f61d
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/parser/template/graph_tests/test_that.py
|
1bf15c7bc72cc26efc11d5ee17897aa25cf2bda8
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,437
|
py
|
test_that.py
|
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.that import TemplateThatNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphThatTests(TemplateGraphTestClient):
def test_that_index_default(self):
template = ET.fromstring("""
<template>
<that/>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
self.assertEqual(ast.children[0].to_string(), "[THAT[WORD]1]")
date_node = ast.children[0]
self.assertIsNotNone(date_node)
self.assertIsInstance(date_node, TemplateThatNode)
self.assertIsNotNone(ast.resolve(self._client_context))
result = ast.resolve_to_string(self.create_client_context("testid"))
self.assertIsNotNone(result)
def test_that_index_as_attrib(self):
template = ET.fromstring("""
<template>
<that index="1"></that>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
self.assertEqual(ast.children[0].to_string(), "[THAT[WORD]1]")
date_node = ast.children[0]
self.assertIsNotNone(date_node)
self.assertIsInstance(date_node, TemplateThatNode)
self.assertIsNotNone(ast.resolve(self._client_context))
result = ast.resolve_to_string(self.create_client_context("testid"))
self.assertIsNotNone(result)
def test_that_question_sentnce_as_attrib(self):
template = ET.fromstring("""
<template>
<that index="1, 1"></that>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
self.assertEqual(ast.children[0].to_string(), "[THAT[WORD]1, 1]")
date_node = ast.children[0]
self.assertIsNotNone(date_node)
self.assertIsInstance(date_node, TemplateThatNode)
self.assertIsNotNone(ast.resolve(self._client_context))
result = ast.resolve_to_string(self.create_client_context("testid"))
self.assertIsNotNone(result)
def test_that_index_as_child(self):
template = ET.fromstring("""
<template>
<that>
<index>1</index>
</that>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
self.assertEqual(ast.children[0].to_string(), "[THAT[NODE]]")
date_node = ast.children[0]
self.assertIsNotNone(date_node)
self.assertIsInstance(date_node, TemplateThatNode)
self.assertIsNotNone(ast.resolve(self._client_context))
result = ast.resolve_to_string(self.create_client_context("testid"))
self.assertIsNotNone(result)
def test_that_question_sentence_as_child(self):
template = ET.fromstring("""
<template>
<that>
<index>1, 1</index>
</that>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
self.assertEqual(ast.children[0].to_string(), "[THAT[NODE]]")
date_node = ast.children[0]
self.assertIsNotNone(date_node)
self.assertIsInstance(date_node, TemplateThatNode)
self.assertIsNotNone(ast.resolve(self._client_context))
result = ast.resolve_to_string(self.create_client_context("testid"))
self.assertIsNotNone(result)
|
89c09f622e242801de604a1148cf24dd2d1b8d80
|
34cd8ee9aca03e0dcbc40a63d1c97c780b29ca7b
|
/fetch/views.py
|
49316d45ec54b901ac8d5a34ed25b899c5774753
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
csev/dj4e-samples
|
5bb5c25eab6c253b2533dc4e1b438dc89e32733b
|
af2386d14fd856462acc37fd906a16fae5ba482c
|
refs/heads/main
| 2023-08-16T18:24:01.571774
| 2023-08-16T01:33:19
| 2023-08-16T01:33:19
| 166,510,747
| 508
| 640
|
MIT
| 2023-08-16T01:33:20
| 2019-01-19T05:42:05
|
Python
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
views.py
|
from django.shortcuts import render, redirect, reverse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.http import JsonResponse, HttpResponse
from datetime import datetime, timedelta
from django.utils.html import escape
import time
class HomeView(View) :
def get(self, request):
return render(request, 'fetch/main.html')
def jsonfun(request):
time.sleep(2)
stuff = {
'first': 'first thing',
'second': 'second thing'
}
return JsonResponse(stuff)
# References
# https://simpleisbetterthancomplex.com/tutorial/2016/07/27/how-to-return-json-encoded-response.html
|
9122831cd0bc55455c67e0bff02066d6ea74f56d
|
5f65d81134e2384f668cc920c85d5a95a5559683
|
/models/object_detection/pytorch/ssd-resnet34/inference/cpu/oob_utils.py
|
7093217511df191bccbd338dbf9d60d89e55890d
|
[
"Apache-2.0"
] |
permissive
|
IntelAI/models
|
a59202087140a868bd8f08d31c5b4cc0c9a91a7d
|
9304c9f59fde013f158ac338fc80171c0e8cda5d
|
refs/heads/master
| 2023-08-17T12:30:58.047221
| 2023-08-07T21:12:59
| 2023-08-07T21:12:59
| 154,412,076
| 609
| 220
|
Apache-2.0
| 2023-09-02T12:59:45
| 2018-10-23T23:59:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,196
|
py
|
oob_utils.py
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import torch
import torch.fx as fx
from torch.nn.utils.fusion import fuse_conv_bn_eval
from typing import Type, Dict, Any, Tuple, Iterable
import torch
import copy
def _parent_name(target : str) -> Tuple[str, str]:
"""
Splits a qualname into parent path and last atom.
For example, `foo.bar.baz` -> (`foo.bar`, `baz`)
"""
*parent, name = target.rsplit('.', 1)
return parent[0] if parent else '', name
# Works for length 2 patterns with 2 modules
def matches_module_pattern(pattern: Iterable[Type], node: fx.Node, modules: Dict[str, Any]):
if len(node.args) == 0:
return False
nodes: Tuple[Any, fx.Node] = (node.args[0], node)
for expected_type, current_node in zip(pattern, nodes):
if not isinstance(current_node, fx.Node):
return False
if current_node.op != 'call_module':
return False
if not isinstance(current_node.target, str):
return False
if current_node.target not in modules:
return False
if type(modules[current_node.target]) is not expected_type:
return False
return True
def replace_node_module(node: fx.Node, modules: Dict[str, Any], new_module: torch.nn.Module):
assert(isinstance(node.target, str))
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, new_module)
def conv_bn_fuse(model: torch.nn.Module, inplace=False) -> torch.nn.Module:
"""
Fuses convolution/BN layers for inference purposes. Will deepcopy your
model by default, but can modify the model inplace as well.
"""
patterns = [(torch.nn.Conv1d, torch.nn.BatchNorm1d),
(torch.nn.Conv2d, torch.nn.BatchNorm2d),
(torch.nn.Conv3d, torch.nn.BatchNorm3d)]
if not inplace:
model = copy.deepcopy(model)
fx_model = fx.symbolic_trace(model)
modules = dict(fx_model.named_modules())
new_graph = copy.deepcopy(fx_model.graph)
for pattern in patterns:
for node in new_graph.nodes:
if matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
continue
conv = modules[node.args[0].target]
bn = modules[node.target]
fused_conv = fuse_conv_bn_eval(conv, bn)
replace_node_module(node.args[0], modules, fused_conv)
node.replace_all_uses_with(node.args[0])
new_graph.erase_node(node)
return fx.GraphModule(fx_model, new_graph)
|
6b039032f6d32e142798b8c1e9bac3b668a95e4c
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/growatt_server/sensor_types/tlx.py
|
645b32db9d0fdb67fbe01856624305fb2281fb1f
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 16,033
|
py
|
tlx.py
|
"""Growatt Sensor definitions for the TLX type.
TLX Type is also shown on the UI as: "MIN/MIC/MOD/NEO"
"""
from __future__ import annotations
from homeassistant.components.sensor import SensorDeviceClass, SensorStateClass
from homeassistant.const import (
PERCENTAGE,
UnitOfElectricCurrent,
UnitOfElectricPotential,
UnitOfEnergy,
UnitOfFrequency,
UnitOfPower,
UnitOfTemperature,
)
from .sensor_entity_description import GrowattSensorEntityDescription
TLX_SENSOR_TYPES: tuple[GrowattSensorEntityDescription, ...] = (
GrowattSensorEntityDescription(
key="tlx_energy_today",
translation_key="tlx_energy_today",
api_key="eacToday",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_energy_total",
translation_key="tlx_energy_total",
api_key="eacTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_energy_total_input_1",
translation_key="tlx_energy_total_input_1",
api_key="epv1Total",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_energy_today_input_1",
translation_key="tlx_energy_today_input_1",
api_key="epv1Today",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_voltage_input_1",
translation_key="tlx_voltage_input_1",
api_key="vpv1",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_amperage_input_1",
translation_key="tlx_amperage_input_1",
api_key="ipv1",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_wattage_input_1",
translation_key="tlx_wattage_input_1",
api_key="ppv1",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_energy_total_input_2",
translation_key="tlx_energy_total_input_2",
api_key="epv2Total",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_energy_today_input_2",
translation_key="tlx_energy_today_input_2",
api_key="epv2Today",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_voltage_input_2",
translation_key="tlx_voltage_input_2",
api_key="vpv2",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_amperage_input_2",
translation_key="tlx_amperage_input_2",
api_key="ipv2",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_wattage_input_2",
translation_key="tlx_wattage_input_2",
api_key="ppv2",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_energy_total_input_3",
translation_key="tlx_energy_total_input_3",
api_key="epv3Total",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_energy_today_input_3",
translation_key="tlx_energy_today_input_3",
api_key="epv3Today",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_voltage_input_3",
translation_key="tlx_voltage_input_3",
api_key="vpv3",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_amperage_input_3",
translation_key="tlx_amperage_input_3",
api_key="ipv3",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_wattage_input_3",
translation_key="tlx_wattage_input_3",
api_key="ppv3",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_energy_total_input_4",
translation_key="tlx_energy_total_input_4",
api_key="epv4Total",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_energy_today_input_4",
translation_key="tlx_energy_today_input_4",
api_key="epv4Today",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_voltage_input_4",
translation_key="tlx_voltage_input_4",
api_key="vpv4",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_amperage_input_4",
translation_key="tlx_amperage_input_4",
api_key="ipv4",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_wattage_input_4",
translation_key="tlx_wattage_input_4",
api_key="ppv4",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_solar_generation_total",
translation_key="tlx_solar_generation_total",
api_key="epvTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_internal_wattage",
translation_key="tlx_internal_wattage",
api_key="ppv",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_reactive_voltage",
translation_key="tlx_reactive_voltage",
api_key="vacrs",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_frequency",
translation_key="tlx_frequency",
api_key="fac",
native_unit_of_measurement=UnitOfFrequency.HERTZ,
device_class=SensorDeviceClass.FREQUENCY,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_current_wattage",
translation_key="tlx_current_wattage",
api_key="pac",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_temperature_1",
translation_key="tlx_temperature_1",
api_key="temp1",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_temperature_2",
translation_key="tlx_temperature_2",
api_key="temp2",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_temperature_3",
translation_key="tlx_temperature_3",
api_key="temp3",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_temperature_4",
translation_key="tlx_temperature_4",
api_key="temp4",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_temperature_5",
translation_key="tlx_temperature_5",
api_key="temp5",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
precision=1,
),
GrowattSensorEntityDescription(
key="tlx_all_batteries_discharge_today",
translation_key="tlx_all_batteries_discharge_today",
api_key="edischargeToday",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
GrowattSensorEntityDescription(
key="tlx_all_batteries_discharge_total",
translation_key="tlx_all_batteries_discharge_total",
api_key="edischargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_battery_1_discharge_w",
translation_key="tlx_battery_1_discharge_w",
api_key="bdc1DischargePower",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="tlx_battery_1_discharge_total",
translation_key="tlx_battery_1_discharge_total",
api_key="bdc1DischargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_battery_2_discharge_w",
translation_key="tlx_battery_2_discharge_w",
api_key="bdc1DischargePower",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="tlx_battery_2_discharge_total",
translation_key="tlx_battery_2_discharge_total",
api_key="bdc1DischargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_all_batteries_charge_today",
translation_key="tlx_all_batteries_charge_today",
api_key="echargeToday",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
GrowattSensorEntityDescription(
key="tlx_all_batteries_charge_total",
translation_key="tlx_all_batteries_charge_total",
api_key="echargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_battery_1_charge_w",
translation_key="tlx_battery_1_charge_w",
api_key="bdc1ChargePower",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="tlx_battery_1_charge_total",
translation_key="tlx_battery_1_charge_total",
api_key="bdc1ChargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_battery_2_charge_w",
translation_key="tlx_battery_2_charge_w",
api_key="bdc1ChargePower",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="tlx_battery_2_charge_total",
translation_key="tlx_battery_2_charge_total",
api_key="bdc1ChargeTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_export_to_grid_today",
translation_key="tlx_export_to_grid_today",
api_key="etoGridToday",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
GrowattSensorEntityDescription(
key="tlx_export_to_grid_total",
translation_key="tlx_export_to_grid_total",
api_key="etoGridTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_load_consumption_today",
translation_key="tlx_load_consumption_today",
api_key="elocalLoadToday",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
GrowattSensorEntityDescription(
key="mix_load_consumption_total",
translation_key="mix_load_consumption_total",
api_key="elocalLoadTotal",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
never_resets=True,
),
GrowattSensorEntityDescription(
key="tlx_statement_of_charge",
translation_key="tlx_statement_of_charge",
api_key="bmsSoc",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
),
)
|
ec29fb476027fdb933b264516c0f14561d4d1820
|
4ff11364a6dc59bd6bf7fb4efd868c7e45b1caab
|
/calamari_ocr/scripts/predict.py
|
1de32cf8eb7bc36c6b859bea97b89777726d38d0
|
[
"Apache-2.0"
] |
permissive
|
Calamari-OCR/calamari
|
6cf29512e71025b90807462b452595abe94cb8f3
|
2f71b7eb08339d25ccb21d80c1d5b851f3d5bdaa
|
refs/heads/master
| 2023-08-22T05:34:23.008839
| 2023-08-18T11:01:23
| 2023-08-18T11:01:23
| 126,039,059
| 1,019
| 245
|
Apache-2.0
| 2023-09-05T09:30:58
| 2018-03-20T15:22:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,221
|
py
|
predict.py
|
import os
import zlib
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional
import tfaip.util.logging
from bidi.algorithm import get_base_level
from paiargparse import PAIArgumentParser, pai_meta, pai_dataclass
from calamari_ocr import __version__
from calamari_ocr.ocr.dataset.datareader.base import CalamariDataGeneratorParams
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.ocr.dataset.params import DATA_GENERATOR_CHOICES
from calamari_ocr.ocr.model.ctcdecoder.ctc_decoder import (
CTCDecoderParams,
CTCDecoderType,
)
from calamari_ocr.ocr.predict.params import Predictions, PredictorParams
from calamari_ocr.ocr.voting import VoterParams
from calamari_ocr.utils.glob import glob_all
if TYPE_CHECKING:
from calamari_ocr.ocr.dataset.pipeline import CalamariPipeline
logger = tfaip.util.logging.logger(__name__)
@pai_dataclass
@dataclass
class PredictArgs:
checkpoint: List[str] = field(
default_factory=list, metadata=pai_meta(mode="flat", help="Path to the checkpoint without file extension")
)
data: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(mode="flat", choices=DATA_GENERATOR_CHOICES),
)
verbose: bool = field(
default=True,
metadata=pai_meta(
mode="flat",
help="Print the prediction result to the log",
),
)
extended_prediction_data: bool = field(
default=False,
metadata=pai_meta(
mode="flat",
help="Write: Predicted string, labels; position, probabilities and alternatives of chars to a .pred file",
),
)
extended_prediction_data_format: str = field(
default="json",
metadata=pai_meta(
mode="flat",
help="Extension format: Either pred or json. Note that json will not print logits.",
),
)
ctc_decoder: CTCDecoderParams = field(default_factory=CTCDecoderParams, metadata=pai_meta(mode="ignore"))
voter: VoterParams = field(default_factory=VoterParams)
output_dir: Optional[str] = field(
default=None,
metadata=pai_meta(
mode="flat",
help="By default the prediction files will be written to the same directory as the given files. "
"You can use this argument to specify a specific output dir for the prediction files.",
),
)
predictor: PredictorParams = field(
default_factory=PredictorParams,
metadata=pai_meta(
fix_dc=True,
mode="flat",
),
)
def prepare_ctc_decoder_params(ctc_decoder: CTCDecoderParams):
if ctc_decoder.dictionary:
dictionary = set()
logger.info("Creating dictionary")
for path in glob_all(ctc_decoder.dictionary):
with open(path, "r") as f:
dictionary = dictionary.union({word for word in f.read().split()})
ctc_decoder.dictionary = dictionary
logger.info("Dictionary with {} unique words successfully created.".format(len(dictionary)))
if ctc_decoder.dictionary:
logger.warning("USING A LANGUAGE MODEL IS CURRENTLY EXPERIMENTAL ONLY. NOTE: THE PREDICTION IS VERY SLOW!")
ctc_decoder.type = CTCDecoderType.WordBeamSearch
def run(args: PredictArgs):
# check if loading a json file
# TODO: support running from JSON
# if len(args.files) == 1 and args.files[0].endswith("json"):
# import json
# with open(args.files[0], 'r') as f:
# json_args = json.load(f)
# for key, value in json_args.items():
# setattr(args, key, value)
# checks
if args.extended_prediction_data_format not in ["pred", "json"]:
raise Exception("Only 'pred' and 'json' are allowed extended prediction data formats")
# add json as extension, resolve wildcard, expand user, ... and remove .json again
args.checkpoint = [(cp if cp.endswith(".json") else cp + ".json") for cp in args.checkpoint]
args.checkpoint = glob_all(args.checkpoint)
args.checkpoint = [cp[:-5] for cp in args.checkpoint]
# create ctc decoder
prepare_ctc_decoder_params(args.ctc_decoder)
# predict for all models
from calamari_ocr.ocr.predict.predictor import MultiPredictor
predictor = MultiPredictor.from_paths(
checkpoints=args.checkpoint,
voter_params=args.voter,
predictor_params=args.predictor,
)
do_prediction = predictor.predict(args.data)
pipeline: CalamariPipeline = predictor.data.get_or_create_pipeline(predictor.params.pipeline, args.data)
reader = pipeline.reader()
if len(reader) == 0:
raise Exception("Empty dataset provided. Check your command line arguments or if the provided files are empty.")
avg_sentence_confidence = 0
n_predictions = 0
reader.prepare_store()
# output the voted results to the appropriate files
for s in do_prediction:
_, (result, prediction), meta = s.inputs, s.outputs, s.meta
sample = reader.sample_by_id(meta["id"])
n_predictions += 1
sentence = prediction.sentence
avg_sentence_confidence += prediction.avg_char_probability
if args.verbose:
lr = "\u202A\u202B"
logger.info("{}: '{}{}{}'".format(meta["id"], lr[get_base_level(sentence)], sentence, "\u202C"))
output_dir = args.output_dir if args.output_dir else os.path.dirname(prediction.line_path)
reader.store_text_prediction(prediction, meta["id"], output_dir=output_dir)
if args.extended_prediction_data:
ps = Predictions()
ps.line_path = sample["image_path"] if "image_path" in sample else sample["id"]
ps.predictions.extend([prediction] + [r.prediction for r in result])
output_dir = output_dir if output_dir else os.path.dirname(ps.line_path)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if args.extended_prediction_data_format == "pred":
data = zlib.compress(ps.to_json(indent=2, ensure_ascii=False).encode("utf-8"))
elif args.extended_prediction_data_format == "json":
# remove logits
for p in ps.predictions:
p.logits = None
data = ps.to_json(indent=2)
else:
raise Exception("Unknown prediction format.")
reader.store_extended_prediction(
data,
sample,
output_dir=output_dir,
extension=args.extended_prediction_data_format,
)
avg_sentence_confidence = avg_sentence_confidence / n_predictions if n_predictions else 0
logger.info("Average sentence confidence: {:.2%}".format(avg_sentence_confidence))
reader.store()
logger.info("All prediction files written")
def main():
parser = PAIArgumentParser()
parser.add_argument("--version", action="version", version="%(prog)s v" + __version__)
parser.add_root_argument("root", PredictArgs, flat=True)
args = parser.parse_args()
run(args.root)
if __name__ == "__main__":
main()
|
ab7fb689f0be206e9fd01b4c9e8c6990c100043e
|
a7c0cc71e6da4615eca2c3d75117dad5b8dce8d3
|
/CTFd/utils/events/__init__.py
|
ad1c7e3db8470f3fa76f9cac65aecd25e8007a77
|
[
"Apache-2.0"
] |
permissive
|
CTFd/CTFd
|
4b75207aeea3ed8d761cc6269c27a070693ab3ec
|
d8f0b9e602fca109cabe1895e847d39a46ce7429
|
refs/heads/master
| 2023-09-01T19:19:19.767862
| 2023-08-29T18:46:53
| 2023-08-29T18:46:53
| 28,681,142
| 4,593
| 2,273
|
Apache-2.0
| 2023-09-13T18:24:37
| 2015-01-01T05:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,890
|
py
|
__init__.py
|
import json
from collections import defaultdict
from queue import Queue
from gevent import Timeout, spawn
from tenacity import retry, wait_exponential
from CTFd.cache import cache
from CTFd.utils import string_types
class ServerSentEvent(object):
def __init__(self, data, type=None, id=None):
self.data = data
self.type = type
self.id = id
def __str__(self):
if isinstance(self.data, string_types):
data = self.data
else:
data = json.dumps(self.data)
lines = ["data:{value}".format(value=line) for line in data.splitlines()]
if self.type:
lines.insert(0, "event:{value}".format(value=self.type))
if self.id:
lines.append("id:{value}".format(value=self.id))
return "\n".join(lines) + "\n\n"
def to_dict(self):
d = {"data": self.data}
if self.type:
d["type"] = self.type
if self.id:
d["id"] = self.id
return d
class EventManager(object):
def __init__(self):
self.clients = {}
def publish(self, data, type=None, id=None, channel="ctf"):
event = ServerSentEvent(data, type=type, id=id)
message = event.to_dict()
for client in list(self.clients.values()):
client[channel].put(message)
return len(self.clients)
def listen(self):
pass
def subscribe(self, channel="ctf"):
q = defaultdict(Queue)
self.clients[id(q)] = q
try:
# Immediately yield a ping event to force Response headers to be set
# or else some reverse proxies will incorrectly buffer SSE
yield ServerSentEvent(data="ping", type="ping")
while True:
with Timeout(5, False):
message = q[channel].get()
yield ServerSentEvent(**message)
yield ServerSentEvent(data="ping", type="ping")
finally:
del self.clients[id(q)]
del q
class RedisEventManager(EventManager):
def __init__(self):
super(EventManager, self).__init__()
self.client = cache.cache._write_client
self.clients = {}
def publish(self, data, type=None, id=None, channel="ctf"):
event = ServerSentEvent(data, type=type, id=id)
message = json.dumps(event.to_dict())
return self.client.publish(message=message, channel=channel)
def listen(self, channel="ctf"):
@retry(wait=wait_exponential(min=1, max=30))
def _listen():
while True:
pubsub = self.client.pubsub()
pubsub.subscribe(channel)
try:
while True:
message = pubsub.get_message(
ignore_subscribe_messages=True, timeout=5
)
if message:
if message["type"] == "message":
event = json.loads(message["data"])
for client in list(self.clients.values()):
client[channel].put(event)
finally:
pubsub.close()
spawn(_listen)
def subscribe(self, channel="ctf"):
q = defaultdict(Queue)
self.clients[id(q)] = q
try:
# Immediately yield a ping event to force Response headers to be set
# or else some reverse proxies will incorrectly buffer SSE
yield ServerSentEvent(data="ping", type="ping")
while True:
with Timeout(5, False):
message = q[channel].get()
yield ServerSentEvent(**message)
yield ServerSentEvent(data="ping", type="ping")
finally:
del self.clients[id(q)]
del q
|
4ade7b3e62ecadc4556e179776d9bcd985d5c8fb
|
49abdb1be64ad5ac3ca7114d7b82f1f4a7929c0f
|
/.github/nightly_build/darkify.py
|
8894988261b4f92bfc705bf613bd207a71cbdd9c
|
[
"Apache-2.0"
] |
permissive
|
tencent-quantum-lab/tensorcircuit
|
1581d47e6d33831223ccbe558fb4be968bfd8167
|
b96a1b1c11df8c767e85476efd6c336bb2b3a7b9
|
refs/heads/master
| 2023-09-05T16:54:20.170821
| 2023-09-05T09:24:23
| 2023-09-05T09:24:23
| 492,659,956
| 222
| 60
|
Apache-2.0
| 2023-09-05T04:38:31
| 2022-05-16T02:31:33
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
darkify.py
|
from datetime import datetime
import requests
def change_version(post=""):
datestr = datetime.now().strftime("%Y%m%d")
datestr += post
with open("tensorcircuit/__init__.py", "r") as f:
r = []
for l in f.readlines():
if l.startswith("__version__"):
l = l[:-2]
l += ".dev" + datestr + '"\n'
r.append(l)
# __version__ = "0.2.2.dev20220706"
with open("tensorcircuit/__init__.py", "w") as f:
f.writelines(r)
def update_setuppy(url=None):
if not url:
url = "https://raw.githubusercontent.com/refraction-ray/tensorcircuit-dev/beta/.github/nightly_build/setup.py"
r = requests.get(url)
with open("setup.py", "w") as f:
f.writelines(r.text)
if __name__ == "__main__":
change_version()
update_setuppy()
|
c94865ad0e0eb841d6aca0db63155f0bce81b6d6
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Rhino/DocObjects/__init___parts/PointObject.py
|
b502e914b95c299f6b795b8b368dcbf7f64ebc92
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 303
|
py
|
PointObject.py
|
class PointObject(RhinoObject):
# no doc
def DuplicatePointGeometry(self):
""" DuplicatePointGeometry(self: PointObject) -> Point """
pass
PointGeometry=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PointGeometry(self: PointObject) -> Point
"""
|
ef4a25f8b0238d3176cd74ac751100c5b06d3770
|
cbc4eec2c39bc954d05d28be812592a205106b8b
|
/python/app/amrev.py
|
54b7257cf4eeee3160f853846d63ab1f96544f70
|
[] |
no_license
|
pranab/avenir
|
756abaf6dc1d11bdafb61a5df77b0ed3ce56008c
|
4381166d61f7bc6088be34e6b35927a37ef6ba98
|
refs/heads/master
| 2022-10-07T20:58:12.992960
| 2022-09-06T04:28:47
| 2022-09-06T04:28:47
| 4,390,566
| 171
| 84
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
amrev.py
|
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# Package imports
import os
import sys
sys.path.append(os.path.abspath("../lib"))
from util import *
# parses amazon product review data and extracts various components from it
# data source http://jmcauley.ucsd.edu/data/amazon/
def parse(path):
"""
prase json
"""
f = open(path, 'r')
for l in f:
yield eval(l)
def prAll(revFile, comp):
"""
extracts review component
"""
for review in parse(revFile):
print(review["asin"] + "," + str(review[comp]))
def prSpecific(revFile, prod, comp):
"""
extracts product specific review component
"""
for review in parse(revFile):
asin = review['asin']
if asin == prod:
print(review[comp])
if __name__ == "__main__":
revFile = "data/reviews_Cell_Phones_and_Accessories_5.json"
op = sys.argv[1]
if op == "review":
# all review text
prAll(revFile, "reviewText")
elif op == "summary":
# all review summary
prAll(revFile, "summary")
elif op == "overall":
# all review overall
prAll(revFile, "overall")
elif op == "revCount":
# review count for all products
revCount = dict()
for review in parse(revFile):
asin = review['asin']
addToKeyedCounter(revCount, asin, 1)
max = 0
for item in revCount.items():
print(item)
cnt = int(item[1])
if cnt > max:
max = cnt
maxRev = item
print("max review")
print(maxRev)
elif op == "prReview":
# all review for a product
prod = sys.argv[2]
prSpecific(revFile, prod, "reviewText")
elif op == "prSummary":
# all summary for a product
prod = sys.argv[2]
prSpecific(revFile, prod, "summary")
elif op == "prOverall":
# all overall for a product
prod = sys.argv[2]
prSpecific(revFile, prod, "overall")
else:
raise ValueError("invalid command")
|
46ca8eb64c0bb5987b9b74cf03978dc7046144b7
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/gift_system/hubs/base/stamper.py
|
2a07bb76d4109c37509ddd2b74d1c5b1bb92f07d
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
stamper.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/gift_system/hubs/base/stamper.py
import typing
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.gift_system.hubs.subsystems import BaseHubSubsystem
from helpers import dependency
from skeletons.gui.shared import IItemsCache
if typing.TYPE_CHECKING:
from helpers.server_settings import GiftEventConfig
class IGiftEventStamper(BaseHubSubsystem):
def isBalanceAvailable(self):
raise NotImplementedError
def wasBalanceAvailable(self):
raise NotImplementedError
def getStampCount(self, stampName):
raise NotImplementedError
class GiftEventBaseStamper(IGiftEventStamper):
__slots__ = ('__updateCallback', '__isBalanceAvailable', '__wasBalanceAvailable')
_STAMPS = set()
__itemsCache = dependency.descriptor(IItemsCache)
def __init__(self, eventSettings, updateCallback):
super(GiftEventBaseStamper, self).__init__(eventSettings)
self.__updateCallback = updateCallback
self.__isBalanceAvailable = self.__wasBalanceAvailable = False
self.__initBalanceWatchers()
g_clientUpdateManager.addCallbacks({'cache.mayConsumeWalletResources': self.__updateBalanceAvailability,
'cache.entitlements': self.__updateBalanceContent})
def destroy(self):
self.__itemsCache.onSyncCompleted -= self.__onItemsSyncCompleted
g_clientUpdateManager.removeObjectCallbacks(self)
self.__updateCallback = None
return
def isBalanceAvailable(self):
return self.__isBalanceAvailable
def wasBalanceAvailable(self):
return self.__wasBalanceAvailable
def getStampCount(self, stampName):
return self.__itemsCache.items.stats.entitlements.get(stampName, 0)
def _isNotificationsEnabled(self):
return self._settings.isEnabled
def __initBalanceWatchers(self):
if not self.__itemsCache.isSynced():
self.__itemsCache.onSyncCompleted += self.__onItemsSyncCompleted
return
self.__onItemsSyncCompleted()
def __onItemsSyncCompleted(self, *_):
mayConsumeWalletResources = self.__itemsCache.items.stats.mayConsumeWalletResources
self.__isBalanceAvailable = self.__wasBalanceAvailable = mayConsumeWalletResources
self.__itemsCache.onSyncCompleted -= self.__onItemsSyncCompleted
self.__notifyGiftEventHub()
def __notifyGiftEventHub(self):
if self._isNotificationsEnabled():
self.__updateCallback()
def __updateBalanceAvailability(self, isAvailable):
if self.__isBalanceAvailable != isAvailable:
self.__isBalanceAvailable = isAvailable
self.__wasBalanceAvailable = self.__wasBalanceAvailable or isAvailable
self.__notifyGiftEventHub()
def __updateBalanceContent(self, entitlementsData):
if self._STAMPS & set(entitlementsData.keys()):
self.__notifyGiftEventHub()
|
35b45477c1a5cf20d9f360d2beef5ba7d85cb447
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/test/utils/wagtail_factories/__init__.py
|
885fbe90507750e36274673077b2ade54482cd3c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 75
|
py
|
__init__.py
|
from .blocks import * # noqa: F403
from .factories import * # noqa: F403
|
d5aa2b2b9d116cfd2251a63c4775605473132a69
|
0c932b52ddb40867419dedd394934c059fc4ef7e
|
/allenact/utils/cache_utils.py
|
e54369536654a25aae4103badae4c011c053d090
|
[
"MIT"
] |
permissive
|
allenai/allenact
|
ed614f745cbd8ddb1404af6e8d5e855b5d66908a
|
9772eeeb7eacc1f9a83c90d1cf549a3f7e783c12
|
refs/heads/main
| 2023-09-05T01:33:37.424674
| 2023-06-13T18:13:57
| 2023-06-13T18:13:57
| 233,944,205
| 266
| 58
|
NOASSERTION
| 2023-07-25T17:05:01
| 2020-01-14T21:58:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
cache_utils.py
|
import math
from typing import Dict, Any, Union, Callable, Optional
from allenact.utils.system import get_logger
def pos_to_str_for_cache(pos: Dict[str, float]) -> str:
return "_".join([str(pos["x"]), str(pos["y"]), str(pos["z"])])
def str_to_pos_for_cache(s: str) -> Dict[str, float]:
split = s.split("_")
return {"x": float(split[0]), "y": float(split[1]), "z": float(split[2])}
def get_distance(
cache: Dict[str, Any], pos: Dict[str, float], target: Dict[str, float]
) -> float:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = find_nearest_point_in_cache(cache, pos)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
target = find_nearest_point_in_cache(cache, target)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
print("Your cache is incomplete!")
exit()
return sp
def get_distance_to_object(
cache: Dict[str, Any], pos: Dict[str, float], target_class: str
) -> float:
dists = []
weights = []
for rounder_func_0 in [math.ceil, math.floor]:
for rounder_func_1 in [math.ceil, math.floor]:
rounded_pos = {
"x": 0.25 * rounder_func_0(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * rounder_func_1(pos["z"] / 0.25),
}
dist = _get_shortest_path_distance_to_object_from_cache(
cache, rounded_pos, target_class
)
if dist >= 0:
dists.append(dist)
weights.append(
1.0
/ (
math.sqrt(
(pos["x"] - rounded_pos["x"]) ** 2
+ (pos["z"] - rounded_pos["z"]) ** 2
)
+ 1e6
)
)
if len(dists) == 0:
raise RuntimeError("Your cache is incomplete!")
total_weight = sum(weights)
weights = [w / total_weight for w in weights]
return sum(d * w for d, w in zip(dists, weights))
def _get_shortest_path_distance_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target: Dict[str, float]
) -> float:
try:
return cache[pos_to_str_for_cache(position)][pos_to_str_for_cache(target)][
"distance"
]
except KeyError:
return -1.0
def _get_shortest_path_distance_to_object_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target_class: str
) -> float:
try:
return cache[pos_to_str_for_cache(position)][target_class]["distance"]
except KeyError:
return -1.0
def find_nearest_point_in_cache(
cache: Dict[str, Any], point: Dict[str, float]
) -> Dict[str, float]:
best_delta = float("inf")
closest_point: Dict[str, float] = {}
for p in cache:
pos = str_to_pos_for_cache(p)
delta = (
abs(point["x"] - pos["x"])
+ abs(point["y"] - pos["y"])
+ abs(point["z"] - pos["z"])
)
if delta < best_delta:
best_delta = delta
closest_point = pos
return closest_point
class DynamicDistanceCache(object):
def __init__(self, rounding: Optional[int] = None):
self.cache: Dict[str, Any] = {}
self.rounding = rounding
self.hits = 0
self.misses = 0
self.num_accesses = 0
def find_distance(
self,
scene_name: str,
position: Dict[str, Any],
target: Union[Dict[str, Any], str],
native_distance_function: Callable[
[Dict[str, Any], Union[Dict[str, Any], str]], float
],
) -> float:
# Convert the position to its rounded string representation
position_str = scene_name + self._pos_to_str(position)
# If the target is also a position, convert it to its rounded string representation
if isinstance(target, str):
target_str = target
else:
target_str = self._pos_to_str(target)
if position_str not in self.cache:
self.cache[position_str] = {}
if target_str not in self.cache[position_str]:
self.cache[position_str][target_str] = native_distance_function(
position, target
)
self.misses += 1
else:
self.hits += 1
self.num_accesses += 1
if self.num_accesses % 1000 == 0:
get_logger().debug("Cache Miss-Hit Ratio: %.4f" % (self.misses / self.hits))
return self.cache[position_str][target_str]
def invalidate(self):
self.cache = []
def _pos_to_str(self, pos: Dict[str, Any]) -> str:
if self.rounding:
pos = {k: round(v, self.rounding) for k, v in pos.items()}
return str(pos)
|
19d01e7297571a92a96c8c5d9e096a2757b94ee9
|
f88a5ad8af044f7956a9d03d57f6ed6610dc9246
|
/tests/unit/weatherapi25/test_weathercoderegistry.py
|
ef91523f2bfb51f484066432eefaf70c7a57576b
|
[
"MIT"
] |
permissive
|
csparpa/pyowm
|
9664089d8dd792489882696f194d9659146d95a1
|
3be796cc60fd2cac1fe1fba005dc4c7f650debcf
|
refs/heads/master
| 2023-08-15T17:47:31.064245
| 2023-06-24T11:50:42
| 2023-06-24T11:50:42
| 12,535,703
| 855
| 240
|
MIT
| 2023-08-30T14:24:56
| 2013-09-02T08:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
test_weathercoderegistry.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pyowm.weatherapi25.weathercoderegistry import WeatherCodeRegistry
class TestWeatherCodeRegistry(unittest.TestCase):
__test_instance = WeatherCodeRegistry({
"abc": [{
"start": 1,
"end": 100
},
{
"start": 120,
"end": 160
}],
"xyz": [{
"start": 345,
"end": 345
}]
})
def test_wrong_instantiation_parameters(self):
self.assertRaises(AssertionError, WeatherCodeRegistry, 'this-is-not-a-dict')
def test_status_for(self):
self.assertTrue(self.__test_instance.status_for(999) is None)
self.assertEqual("abc", self.__test_instance.status_for(150))
self.assertEqual("xyz", self.__test_instance.status_for(345))
def test_get_instance(self):
result = WeatherCodeRegistry.get_instance()
self.assertTrue(isinstance(result, WeatherCodeRegistry))
def test_repr(self):
print(self.__test_instance)
|
dd7525afcb89c4e1eaf0f1616d6f99e52e239596
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/snakemake/linting/__init__.py
|
3a1b0fc94553241077faff399085edb3c6014b90
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
__init__.py
|
import textwrap
import shutil
import inspect
from abc import ABC, abstractmethod
from snakemake.logging import logger
NAME_PATTERN = "[a-zA-Z_][a-zA-Z_0-9]*"
class Linter(ABC):
def __init__(self, workflow, items):
self.items = items
self.workflow = workflow
def read_item(self, item):
return item
def lint(self, json=False):
json_lints = [] if json else None
linted = False
for item in self.items:
item_lints = [
lint
for lint_item in self.lints()
for lint in lint_item(self.read_item(item))
]
if not item_lints:
continue
linted = True
if json:
json_lints.append(
{
"for": self.item_desc_json(item),
"lints": [lint.__dict__ for lint in item_lints],
}
)
else:
logger.warning(
"Lints for {}:\n{}\n".format(
self.item_desc_plain(item),
"\n".join(map(" * {}".format, item_lints)),
)
)
return json_lints, linted
@abstractmethod
def item_desc_json(self, item):
pass
@abstractmethod
def item_desc_plain(self, item):
pass
def lints(self):
return (
method
for name, method in inspect.getmembers(self)
if name.startswith("lint_")
)
class Lint:
def __init__(self, title, body, links=None):
self.title = title
self.body = body
self.links = links or []
def __str__(self):
width, _ = shutil.get_terminal_size()
return "{}:\n{}\n Also see:\n{}".format(
self.title,
"\n".join(
map(" {}".format, textwrap.wrap(self.body, max(width - 6, 20)))
),
"\n".join(map(" {}".format, self.links)),
)
|
1686d9bd41015dfd47cbacef378e6f399d6f8847
|
c9502eb1420a32a7bf36a32391d6b66b7018f9a7
|
/dragonn/callbacks.py
|
bb836632ff88a58cdf170622eb9769bac46345ee
|
[
"MIT"
] |
permissive
|
kundajelab/dragonn
|
ae81f00885494e61b81a6ce359130e2283038364
|
d2442a27a3991a18717ef199e3197d6692f14c33
|
refs/heads/master
| 2023-08-13T21:52:20.555859
| 2022-07-08T03:29:04
| 2022-07-08T03:29:04
| 60,218,942
| 262
| 95
|
MIT
| 2023-03-24T22:57:13
| 2016-06-02T00:03:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
callbacks.py
|
from keras.callbacks import Callback
from dragonn.metrics import *
import warnings
warnings.filterwarnings('ignore')
class MetricsCallback(Callback):
def __init__(self, train_data, validation_data):
super().__init__()
self.validation_data = validation_data
self.train_data = train_data
def on_epoch_end(self, epoch, logs={}):
X_train = self.train_data[0]
y_train = self.train_data[1]
X_val = self.validation_data[0]
y_val = self.validation_data[1]
y_train_pred=self.model.predict(X_train)
y_val_pred=self.model.predict(X_val)
train_classification_result=ClassificationResult(y_train,y_train_pred)
val_classification_result=ClassificationResult(y_val,y_val_pred)
print("Training Data:")
print(train_classification_result)
print("Validation Data:")
print(val_classification_result)
|
bc0c0c776bc05aa3dac826608b2e9463587cffca
|
42926e8621fc64211e385901f737c925ede26c33
|
/scrapGoogleWithPros.py
|
ea235ebe8089cb25a7b5d6803591e215ad96e96f
|
[] |
no_license
|
enghamzasalem/SouqScraper
|
21cd827a3adbfb79f6b99746a91e2d35b8d8c4f7
|
0ef5d396a9c8cf07aadd3b5a3551e8b823c4843e
|
refs/heads/master
| 2023-09-03T07:25:14.068321
| 2023-08-28T14:54:21
| 2023-08-28T14:54:21
| 180,890,693
| 244
| 186
| null | 2023-09-04T12:56:21
| 2019-04-11T22:59:30
|
Python
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
scrapGoogleWithPros.py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get("http://www.google.com/")
search = driver.find_element_by_name("q")
search.send_keys("2*6+8-4")
search.send_keys(Keys.RETURN)
result = driver.find_element_by_xpath("//span[@class='qv3Wpe']")
print(result.get_attribute("innerHTML"))
|
dc4675e5bda126a7580b4ed04990286ccf4f9cd7
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/fate_arch/abc/_federation.py
|
b3e8fd21a92b614601c0f6edcf740edebd7d3b70
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
_federation.py
|
import abc
import typing
from abc import ABCMeta
from fate_arch.abc._gc import GarbageCollectionABC
from fate_arch.common import Party
__all__ = ["FederationABC"]
class FederationABC(metaclass=ABCMeta):
"""
federation, get or remote objects and tables
"""
@property
@abc.abstractmethod
def session_id(self) -> str:
...
@abc.abstractmethod
def get(self, name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC) -> typing.List:
"""
get objects/tables from ``parties``
Parameters
----------
name: str
name of transfer variable
tag: str
tag to distinguish each transfer
parties: typing.List[Party]
parties to get objects/tables from
gc: GarbageCollectionABC
used to do some clean jobs
Returns
-------
list
a list of object or a list of table get from parties with same order of `parties`
"""
...
@abc.abstractmethod
def remote(self, v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC):
"""
remote object/table to ``parties``
Parameters
----------
v: object or table
object/table to remote
name: str
name of transfer variable
tag: str
tag to distinguish each transfer
parties: typing.List[Party]
parties to remote object/table to
gc: GarbageCollectionABC
used to do some clean jobs
Returns
-------
Notes
"""
...
@abc.abstractmethod
def destroy(self, parties):
"""
destroy federation from ``parties``
Parameters
----------
parties: typing.List[Party]
parties to get objects/tables from
Returns
-------
None
"""
...
|
3515a659658e891c7c27a7a3b3d555964664fa20
|
eb9253b8c2a281bebddbf77fa02b249fa18f11b7
|
/knet/kernel_updator.py
|
9021b53637e2d9b7e77efc93699b379b45b1018c
|
[
"Apache-2.0"
] |
permissive
|
ZwwWayne/K-Net
|
781ea91ebe17fb9887409ee030c0bfae8dc60b99
|
5e50ee58957dce972f51096804ff69171c2f072e
|
refs/heads/main
| 2023-05-23T18:43:16.806151
| 2021-12-16T12:43:08
| 2021-12-16T12:43:08
| 417,726,092
| 452
| 55
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,996
|
py
|
kernel_updator.py
|
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import TRANSFORMER_LAYER
@TRANSFORMER_LAYER.register_module()
class KernelUpdator(nn.Module):
def __init__(self,
in_channels=256,
feat_channels=64,
out_channels=None,
input_feat_shape=3,
gate_sigmoid=True,
gate_norm_act=False,
activate_out=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')):
super(KernelUpdator, self).__init__()
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.gate_sigmoid = gate_sigmoid
self.gate_norm_act = gate_norm_act
self.activate_out = activate_out
if isinstance(input_feat_shape, int):
input_feat_shape = [input_feat_shape] * 2
self.input_feat_shape = input_feat_shape
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.feat_channels
self.num_params_out = self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.input_layer = nn.Linear(self.in_channels,
self.num_params_in + self.num_params_out,
1)
self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
if self.gate_norm_act:
self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.activation = build_activation_layer(act_cfg)
self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, update_feature, input_feature):
update_feature = update_feature.reshape(-1, self.in_channels)
num_proposals = update_feature.size(0)
parameters = self.dynamic_layer(update_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels)
input_feats = self.input_layer(
input_feature.reshape(num_proposals, -1, self.feat_channels))
input_in = input_feats[..., :self.num_params_in]
input_out = input_feats[..., -self.num_params_out:]
gate_feats = input_in * param_in.unsqueeze(-2)
if self.gate_norm_act:
gate_feats = self.activation(self.gate_norm(gate_feats))
input_gate = self.input_norm_in(self.input_gate(gate_feats))
update_gate = self.norm_in(self.update_gate(gate_feats))
if self.gate_sigmoid:
input_gate = input_gate.sigmoid()
update_gate = update_gate.sigmoid()
param_out = self.norm_out(param_out)
input_out = self.input_norm_out(input_out)
if self.activate_out:
param_out = self.activation(param_out)
input_out = self.activation(input_out)
# param_out has shape (batch_size, feat_channels, out_channels)
features = update_gate * param_out.unsqueeze(
-2) + input_gate * input_out
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
|
30c89d386f747bd3cbfe49f6f2dd43a7d4bd5f66
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/layout/template/data/_violin.py
|
91f6842f5292a81c0d9f1ccb8025fd9bda35c247
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 543
|
py
|
_violin.py
|
import _plotly_utils.basevalidators
class ViolinValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="violin", parent_name="layout.template.data", **kwargs
):
super(ViolinValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Violin"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
67fe189b089c0aa5e89e012b320e691481700aa4
|
47f94329d813e02a4a2b88dd43d70c2cb8fa0b5a
|
/test/test_api_decorator.py
|
2fe497f6435fab9e1f3c441486cb12e895ffad2b
|
[
"MIT"
] |
permissive
|
laike9m/Cyberbrain
|
b3b798b6a0924d2af3a0c36ed002dee8b9746a3f
|
ef92fee10e3fd4ff4bcee38bf8356b0d645519e3
|
refs/heads/master
| 2023-01-07T12:31:15.636701
| 2022-04-19T08:05:24
| 2022-04-19T08:05:24
| 249,303,697
| 2,512
| 176
|
MIT
| 2022-12-30T22:40:02
| 2020-03-23T00:59:43
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
test_api_decorator.py
|
def test_decorator_api(trace, check_golden_file):
def f(foo):
return foo
@trace
def decorated_func():
a = 1
b = f(a)
return a + b
assert decorated_func() == 2
|
834017331d81dee90f76ec0b8c3d7fb89cdc2c3a
|
98c396dbb1e5e03403de8d8860ee3cc0e48b5366
|
/image/randaugment/wrn.py
|
f054dbbfe4b6e1288166fe91cf6bd2563de752e5
|
[
"Apache-2.0"
] |
permissive
|
google-research/uda
|
94ca1c52547c12081fab52897e51da3bd19d92df
|
960684e363251772a5938451d4d2bc0f1da9e24b
|
refs/heads/master
| 2023-08-25T12:17:16.716778
| 2020-02-08T23:32:13
| 2020-02-08T23:32:13
| 192,635,589
| 2,239
| 351
|
Apache-2.0
| 2021-08-28T07:16:56
| 2019-06-19T01:21:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,880
|
py
|
wrn.py
|
# coding=utf-8
# Copyright 2019 The Google UDA Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the WideResNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from randaugment import custom_ops as ops
import numpy as np
import tensorflow as tf
def residual_block(
x, in_filter, out_filter, stride, update_bn=True):
"""Adds residual connection to `x` in addition to applying BN->ReLU->3x3 Conv.
Args:
x: Tensor that is the output of the previous layer in the model.
in_filter: Number of filters `x` has.
out_filter: Number of filters that the output of this layer will have.
stride: Integer that specified what stride should be applied to `x`.
Returns:
A Tensor that is the result of applying two sequences of BN->ReLU->3x3 Conv
and then adding that Tensor to `x`.
"""
orig_x = x
block_x = x
with tf.variable_scope('residual_only_activation'):
block_x = ops.batch_norm(block_x, update_stats=update_bn,
scope='init_bn')
block_x = tf.nn.relu(block_x)
with tf.variable_scope('sub1'):
block_x = ops.conv2d(
block_x, out_filter, 3, stride=stride, scope='conv1')
with tf.variable_scope('sub2'):
block_x = ops.batch_norm(block_x, update_stats=update_bn, scope='bn2')
block_x = tf.nn.relu(block_x)
block_x = ops.conv2d(
block_x, out_filter, 3, stride=1, scope='conv2')
if stride != 1 or out_filter != in_filter:
orig_x = ops.conv2d(
orig_x, out_filter, 1, stride=stride, scope='conv3')
x = orig_x + block_x
return x
def build_wrn_model(images, num_classes, wrn_size, update_bn=True):
"""Builds the WRN model.
Build the Wide ResNet model from https://arxiv.org/abs/1605.07146.
Args:
images: Tensor of images that will be fed into the Wide ResNet Model.
num_classes: Number of classed that the model needs to predict.
wrn_size: Parameter that scales the number of filters in the Wide ResNet
model.
Returns:
The logits of the Wide ResNet model.
"""
# wrn_size = 16 * widening factor k
kernel_size = wrn_size
filter_size = 3
# depth = num_blocks_per_resnet * 6 + 4 = 28
num_blocks_per_resnet = 4
filters = [
min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4
]
strides = [1, 2, 2] # stride for each resblock
# Run the first conv
with tf.variable_scope('init'):
x = images
output_filters = filters[0]
x = ops.conv2d(x, output_filters, filter_size, scope='init_conv')
first_x = x # Res from the beginning
orig_x = x # Res from previous block
for block_num in range(1, 4):
with tf.variable_scope('unit_{}_0'.format(block_num)):
x = residual_block(
x,
filters[block_num - 1],
filters[block_num],
strides[block_num - 1],
update_bn=update_bn)
for i in range(1, num_blocks_per_resnet):
with tf.variable_scope('unit_{}_{}'.format(block_num, i)):
x = residual_block(
x,
filters[block_num],
filters[block_num],
1,
update_bn=update_bn)
with tf.variable_scope('unit_last'):
x = ops.batch_norm(x, scope='final_bn')
x = tf.nn.relu(x)
x = ops.global_avg_pool(x)
logits = ops.fc(x, num_classes)
return logits
|
6d5c4de01e7ef53388bce437fe73a29b038378fc
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/boto/https_connection.py
|
ddc31a152292e69897fcfc01b9b44f354bd4051d
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,135
|
py
|
https_connection.py
|
# Copyright 2007,2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is derived from
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
"""Extensions to allow HTTPS requests with SSL certificate validation."""
import re
import socket
import ssl
import boto
from boto.compat import six, http_client
class InvalidCertificateException(http_client.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
http_client.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s' %
(self.host, self.reason, self.cert))
def GetValidHostsForCert(cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def ValidateCertificateHostname(cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = GetValidHostsForCert(cert)
boto.log.debug(
"validating server certificate: hostname=%s, certificate hosts=%s",
hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
class CertValidatingHTTPSConnection(http_client.HTTPConnection):
"""An HTTPConnection that connects over SSL and validates certificates."""
default_port = http_client.HTTPS_PORT
def __init__(self, host, port=default_port, key_file=None, cert_file=None,
ca_certs=None, strict=None, **kwargs):
"""Constructor.
Args:
host: The hostname. Can be in 'host:port' form.
port: The port. Defaults to 443.
key_file: A file containing the client's private key
cert_file: A file containing the client's certificates
ca_certs: A file contianing a set of concatenated certificate authority
certs for validating the server against.
strict: When true, causes BadStatusLine to be raised if the status line
can't be parsed as a valid HTTP/1.0 or 1.1 status line.
"""
if six.PY2:
# Python 3.2 and newer have deprecated and removed the strict
# parameter. Since the params are supported as keyword arguments
# we conditionally add it here.
kwargs['strict'] = strict
http_client.HTTPConnection.__init__(self, host=host, port=port, **kwargs)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, "timeout"):
sock = socket.create_connection((self.host, self.port), self.timeout)
else:
sock = socket.create_connection((self.host, self.port))
msg = "wrapping ssl socket; "
if self.ca_certs:
msg += "CA certificate file=%s" % self.ca_certs
else:
msg += "using system provided SSL certs"
boto.log.debug(msg)
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,
certfile=self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not ValidateCertificateHostname(cert, hostname):
raise InvalidCertificateException(hostname,
cert,
'remote hostname "%s" does not match '
'certificate' % hostname)
|
ead5eafee2dfb167ba73c9ea0a8f534cb3b9164d
|
607dc8df19fc5248f6289cdda97857b5d58ca16f
|
/smac/model/multi_objective_model.py
|
23a765a8fffe88dcc1cb8d3957e7797c4c47a138
|
[
"BSD-3-Clause",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
automl/SMAC3
|
7dce243a33023c52d6819deff966f7b502e90ed0
|
541ee7e0383b491b86d1a23dcff669f2efad616d
|
refs/heads/main
| 2023-08-31T17:36:06.067579
| 2023-08-01T13:02:51
| 2023-08-01T13:02:51
| 65,900,469
| 943
| 259
|
NOASSERTION
| 2023-09-11T02:36:57
| 2016-08-17T10:58:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
multi_objective_model.py
|
from __future__ import annotations
from typing import TypeVar
import numpy as np
from smac.model.abstract_model import AbstractModel
__copyright__ = "Copyright 2022, automl.org"
__license__ = "3-clause BSD"
Self = TypeVar("Self", bound="MultiObjectiveModel")
class MultiObjectiveModel(AbstractModel):
"""Wrapper for the surrogate model to predict multiple objectives.
Parameters
----------
models : AbstractModel | list[AbstractModel]
Which model should be used. If it is a list, then it must provide as many models as objectives.
If it is a single model only, the model is used for all objectives.
objectives : list[str]
Which objectives should be used.
seed : int
"""
def __init__(
self,
models: AbstractModel | list[AbstractModel],
objectives: list[str],
seed: int = 0,
) -> None:
self._n_objectives = len(objectives)
if isinstance(models, list):
assert len(models) == len(objectives)
# Make sure the configspace is the same
configspace = models[0]._configspace
for m in models:
assert configspace == m._configspace
self._models = models
else:
configspace = models._configspace
self._models = [models for _ in range(self._n_objectives)]
super().__init__(
configspace=configspace,
instance_features=None,
pca_components=None,
seed=seed,
)
@property
def models(self) -> list[AbstractModel]:
"""The internally used surrogate models."""
return self._models
def predict_marginalized(self, X: np.ndarray) -> tuple[np.ndarray, np.ndarray]: # noqa: D102
mean = np.zeros((X.shape[0], self._n_objectives))
var = np.zeros((X.shape[0], self._n_objectives))
for i, estimator in enumerate(self._models):
m, v = estimator.predict_marginalized(X)
mean[:, i] = m.flatten()
var[:, i] = v.flatten()
return mean, var
def _train(self: Self, X: np.ndarray, Y: np.ndarray) -> Self:
if len(self._models) == 0:
raise ValueError("The list of surrogate models is empty.")
for i, model in enumerate(self._models):
model.train(X, Y[:, i])
return self
def _predict(
self,
X: np.ndarray,
covariance_type: str | None = "diagonal",
) -> tuple[np.ndarray, np.ndarray | None]:
if covariance_type != "diagonal":
raise ValueError("`covariance_type` can only take `diagonal` for this model.")
mean = np.zeros((X.shape[0], self._n_objectives))
var = np.zeros((X.shape[0], self._n_objectives))
for i, estimator in enumerate(self._models):
m, v = estimator.predict(X)
assert v is not None
mean[:, i] = m.flatten()
var[:, i] = v.flatten()
return mean, var
|
f3b03a42bb4bd88fadd7a5502532f749e4d3b887
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/python/tools/microbench/attention.py
|
285b42b7cbb62484ade272633e158c12ad64e28a
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,236
|
py
|
attention.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import argparse
from dataclasses import dataclass
import numpy as np
from benchmark import BenchmarkOp, add_arguments
@dataclass
class OpParam:
batch_size: int
seq_len: int
hidden_size: int
length: int
data_type: type
class BenchmarkAttention(BenchmarkOp):
def __init__(self, args):
BenchmarkOp.__init__(self, args)
@classmethod
def create_inputs_outputs(cls, op_param):
np.random.seed(0)
input_data = np.random.rand(op_param.batch_size, op_param.seq_len, op_param.hidden_size).astype(
op_param.data_type
)
weight = np.random.rand(op_param.hidden_size, op_param.length).astype(op_param.data_type)
bias = np.random.rand(op_param.length).astype(op_param.data_type)
mask_index = np.random.rand(op_param.batch_size, op_param.seq_len).astype(np.int32)
output_data = np.random.rand(op_param.batch_size, op_param.seq_len, op_param.hidden_size).astype(
op_param.data_type
)
inputs = {
"INPUT": input_data,
"WEIGHT": weight,
"BIAS": bias,
"MASK_INDEX": mask_index,
}
outputs = {"return_val": output_data}
return inputs, outputs
def create_cases(self):
model = "models/attention_fp16.onnx" if self.args.precision == "fp16" else "models/attention_fp32.onnx"
data_type = np.float16 if self.args.precision == "fp16" else np.float32
# bert-base
op_param = OpParam(1, 384, 768, 768 * 3, data_type)
self.add_case(op_param, model)
@classmethod
def case_profile(cls, op_param, time):
profile = f"(batch_size seq_len length) = ({op_param.batch_size} {op_param.seq_len} {op_param.length}), {time:7.4f} ms"
return profile
def main():
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
bm = BenchmarkAttention(args)
bm.benchmark()
if __name__ == "__main__":
main()
|
e5004ab64beb3294e93a77c60682e9f569f1c0f7
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/spring/azext_spring/tests/latest/app_managed_identity/test_app_managed_identity_force_set_scenario.py
|
a2c311efacbf4b7455c3dbfce70514132334f395
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 6,543
|
py
|
test_app_managed_identity_force_set_scenario.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import (ScenarioTest, record_only)
from ....vendored_sdks.appplatform.v2023_07_01_preview.models import ManagedIdentityType
"""
In order to re-run this scenario test,
1. Choose a subscription ID in which you'll create user-assigned managed identities, and fill in ${USER_IDENTITY_SUB_ID}
2. Create a resource group ${USER_IDENTITY_RESOURCE_GROUP} in ${USER_IDENTITY_SUB_ID}
3. Manually create 2 user-assigned managed identities for USER_IDENTITY_NAME_1 and USER_IDENTITY_NAME_2 in \
group ${USER_IDENTITY_RESOURCE_GROUP} under subscription ${USER_IDENTITY_SUB_ID}.
4. After successfully re-run, Set ${USER_IDENTITY_SUB_ID} back to "00000000-0000-0000-0000-000000000000"
"""
USER_IDENTITY_SUB_ID = "00000000-0000-0000-0000-000000000000"
MASKED_SUB = "00000000-0000-0000-0000-000000000000"
USER_IDENTITY_RESOURCE_GROUP = "cli"
USER_IDENTITY_NAME_1 = "managed-identity-1"
USER_IDENTITY_NAME_2 = "managed-identity-2"
USER_IDENTITY_RESOURCE_ID_TEMPLATE = "/subscriptions/{}/resourcegroups/{}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{}"
MASKED_USER_IDENTITY_RESOURCE_ID_1 = USER_IDENTITY_RESOURCE_ID_TEMPLATE.format(MASKED_SUB, USER_IDENTITY_RESOURCE_GROUP, USER_IDENTITY_NAME_1)
MASKED_USER_IDENTITY_RESOURCE_ID_2 = USER_IDENTITY_RESOURCE_ID_TEMPLATE.format(MASKED_SUB, USER_IDENTITY_RESOURCE_GROUP, USER_IDENTITY_NAME_2)
USER_IDENTITY_RESOURCE_ID_1 = USER_IDENTITY_RESOURCE_ID_TEMPLATE.format(USER_IDENTITY_SUB_ID, USER_IDENTITY_RESOURCE_GROUP, USER_IDENTITY_NAME_1)
USER_IDENTITY_RESOURCE_ID_2 = USER_IDENTITY_RESOURCE_ID_TEMPLATE.format(USER_IDENTITY_SUB_ID, USER_IDENTITY_RESOURCE_GROUP, USER_IDENTITY_NAME_2)
@record_only()
class AppIdentityForceSet(ScenarioTest):
def test_app_identity_force_set(self):
self.kwargs.update({
'app': 'test-msi-force-set',
'serviceName': 'cli-unittest',
'rg': 'cli',
'ua1': USER_IDENTITY_RESOURCE_ID_1,
'ua2': USER_IDENTITY_RESOURCE_ID_2
})
self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned disable --user-assigned disable',
checks=[
self.check('identity', None)
])
self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned enable --user-assigned disable',
checks=[
self.check('identity.type', ManagedIdentityType.SYSTEM_ASSIGNED, case_sensitive=False),
self.exists('identity.tenantId'),
self.exists('identity.principalId'),
self.check('identity.userAssignedIdentities', None)
])
app = self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned disable --user-assigned {ua1}',
checks=[
self.check('identity.type', ManagedIdentityType.USER_ASSIGNED, case_sensitive=False),
self.exists('identity.tenantId'),
self.check('identity.principalId', None),
self.exists('identity.userAssignedIdentities')
]).json_value
user_identity_dict = self._to_lower(app['identity']['userAssignedIdentities'])
self.assertTrue(type(user_identity_dict) == dict)
self.assertEquals(1, len(user_identity_dict))
self.assertTrue(self._contains_user_id_1(user_identity_dict.keys()))
app = self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned disable --user-assigned {ua2}',
checks=[
self.check('identity.type', ManagedIdentityType.USER_ASSIGNED, case_sensitive=False),
self.exists('identity.tenantId'),
self.check('identity.principalId', None),
self.exists('identity.userAssignedIdentities')
]).json_value
user_identity_dict = self._to_lower(app['identity']['userAssignedIdentities'])
self.assertTrue(type(user_identity_dict) == dict)
self.assertEquals(1, len(user_identity_dict))
self.assertTrue(self._contains_user_id_2(user_identity_dict.keys()))
app = self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned enable --user-assigned {ua1} {ua2}',
checks=[
self.check('identity.type', ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED, case_sensitive=False),
self.exists('identity.tenantId'),
self.exists('identity.principalId'),
self.exists('identity.userAssignedIdentities')
]).json_value
user_identity_dict = self._to_lower(app['identity']['userAssignedIdentities'])
self.assertTrue(type(user_identity_dict) == dict)
self.assertEquals(2, len(user_identity_dict))
self.assertTrue(self._contains_user_id_1(user_identity_dict.keys()))
self.assertTrue(self._contains_user_id_2(user_identity_dict.keys()))
self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned enable --user-assigned disable',
checks=[
self.check('identity.type', ManagedIdentityType.SYSTEM_ASSIGNED, case_sensitive=False),
self.exists('identity.tenantId'),
self.exists('identity.principalId'),
self.check('identity.userAssignedIdentities', None)
])
self.cmd(
'spring app identity force-set -n {app} -g {rg} -s {serviceName} --system-assigned disable --user-assigned disable',
checks=[
self.check('identity', None)
])
def _contains_user_id_1(self, keys):
return MASKED_USER_IDENTITY_RESOURCE_ID_1.lower() in keys or USER_IDENTITY_RESOURCE_ID_1.lower() in keys
def _contains_user_id_2(self, keys):
return MASKED_USER_IDENTITY_RESOURCE_ID_2.lower() in keys or USER_IDENTITY_RESOURCE_ID_2.lower() in keys
def _to_lower(self, str_dict):
new_dict = {}
for key in str_dict.keys():
new_dict[key.lower()] = str_dict[key]
return new_dict
|
cc1c114b0db7d640fc90a999502511eac4dddf85
|
7edc26a54f4b71085db5758ee15e87dfc822c372
|
/openelex/tests/test_fetch.py
|
32147f021a02a3c889135db7d30ff7ecf8dd4b01
|
[
"MIT"
] |
permissive
|
openelections/openelections-core
|
55f1b440644588502a6a1a67f8924024a2f1dffb
|
3c516d8c4cf1166b1868b738a248d48f3378c525
|
refs/heads/master
| 2022-02-06T01:24:38.557078
| 2021-04-22T17:53:34
| 2021-04-22T17:53:34
| 11,376,829
| 161
| 99
|
MIT
| 2022-01-21T18:56:12
| 2013-07-12T19:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 343
|
py
|
test_fetch.py
|
from unittest import TestCase
from openelex.base.fetch import ErrorHandlingURLopener, HTTPError
class TestErrorHandlingURLopener(TestCase):
def setUp(self):
self.opener = ErrorHandlingURLopener()
def test_404(self):
url = "http://example.com/test.csv"
self.assertRaises(HTTPError, self.opener.retrieve, url)
|
13ff27ba48eae8bd9a129fae3a255de92a597b7d
|
3f1e0d8ac4800db05ce37d698a026ec085e3304c
|
/doric-Qt/example/doric/scripts/package-windows.py
|
40e5ae212fcd374a5efefd6cf448a3ba8bee6da4
|
[
"Apache-2.0"
] |
permissive
|
doric-pub/Doric
|
e8080e6e8091018e49e39fa35c7d737fc5323aed
|
ff92ac5b16de12b139f59156e8005dfddb645b41
|
refs/heads/master
| 2023-08-16T18:11:51.849633
| 2023-08-11T12:19:03
| 2023-08-11T12:36:52
| 216,160,126
| 170
| 25
|
Apache-2.0
| 2023-09-14T06:14:23
| 2019-10-19T06:28:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
package-windows.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import time
import platform
import shutil
import glob
import requests
import json
import zipfile
def system(command):
retcode = os.system(command)
if retcode != 0:
raise Exception("Error while executing:\n\t %s" % command)
def main():
version = "0.0.0"
isOnline = "0"
paramlen = len(sys.argv)
if paramlen == 3:
version = sys.argv[1]
isOnline = sys.argv[2]
print("version:"+str(version)+" isOnline:"+str(isOnline))
elif paramlen == 2:
version = sys.argv[1]
print("version:"+str(version))
elif paramlen == 1:
print("version:"+str(version)+" isOnline:"+str(isOnline))
else:
print("params error.");
return;
system('conan export-pkg ./conanfile-windows-debug.py DoricCore/%s@bixin/stable -s arch=x86 -s build_type=Debug -s compiler.version=15 -s os=Windows' % version)
system('conan export-pkg ./conanfile-windows-release.py DoricCore/%s@bixin/stable -s arch=x86 -s build_type=Release -s compiler.version=15 -s os=Windows' % version)
if isOnline == "1":
system('conan upload DoricCore/%s@bixin/stable --all -r=pc' % version)
if __name__ == "__main__":
main()
|
0aa45b3ff0fecbc66c43b7061dcdab70168e0960
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/isosurface/_valuehoverformat.py
|
40fa68323eaffcae97724ed9e33aa03c52b0b8b2
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
_valuehoverformat.py
|
import _plotly_utils.basevalidators
class ValuehoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="valuehoverformat", parent_name="isosurface", **kwargs
):
super(ValuehoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
2e2f6240cddafc7d4847a027600aa4b253d28635
|
982a904a83e2caa7acd8b2ac19cfc5a4fb75bde1
|
/examples/ch11/snippets_ipynb/ch11utilities.py
|
e20cb4f4a36a92b153c42c03325bac0e5747c33d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pdeitel/IntroToPython
|
73bc349fe40701b51f49d17d7fbc5b9985885e48
|
978093febf2ed849a2049e0b0860d2c4998306f7
|
refs/heads/master
| 2023-02-09T08:04:15.313698
| 2023-02-03T23:23:42
| 2023-02-03T23:23:42
| 173,331,130
| 249
| 371
| null | 2022-12-04T06:52:26
| 2019-03-01T16:08:37
| null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
ch11utilities.py
|
# ch11utilities.py
"""Utility function for printing a pass of the
insertion_sort and selection_sort algorithms"""
def print_pass(data, pass_number, index):
"""Print a pass of the algorithm."""
label = f'after pass {pass_number}: '
print(label, end='')
# output elements up to selected item
print(' '.join(str(d) for d in data[:index]),
end=' ' if index != 0 else '')
print(f'{data[index]}* ', end='') # indicate swap with *
# output rest of elements
print(' '.join(str(d) for d in data[index + 1:len(data)]))
# underline elements that are sorted after this pass_number
print(f'{" " * len(label)}{"-- " * pass_number}')
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
|
67ef1cfa42f1e21015da5ee170630fc06b3c0d1e
|
48ee50316a950d9bc789ae843477b58b2913bf0d
|
/src/app/test/api/http/unit/handlers/v1/event_test.py
|
d08a8033a17f67f502db222ef8374b6547b30444
|
[
"MIT"
] |
permissive
|
beer-garden/beer-garden
|
f6d1c305a261b59d3cb3389513fc3138004a8d07
|
a5fd2dcc2444409e243d3fdaa43d86695e5cb142
|
refs/heads/develop
| 2023-08-15T11:50:29.833953
| 2023-07-20T03:20:45
| 2023-07-20T03:20:45
| 120,045,001
| 254
| 38
|
MIT
| 2023-07-20T03:20:47
| 2018-02-03T00:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,193
|
py
|
event_test.py
|
# -*- coding: utf-8 -*-
import json
import pytest
from brewtils.models import Event, System
from mock import Mock
from tornado import gen
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.web import Application
from tornado.websocket import websocket_connect
from beer_garden.api.http.handlers.v1.event import (
WEBSOCKET_EVENT_TYPE_BLOCKLIST,
EventSocket,
)
@pytest.fixture
def get_current_user_mock(monkeypatch):
def get_current_user(self):
return "someuser"
monkeypatch.setattr(EventSocket, "get_current_user", get_current_user)
@pytest.fixture
def eventsocket_mock(get_current_user_mock, monkeypatch):
from beer_garden.api.http.handlers.v1 import event
def _user_can_receive_messages_for_event(user, event):
return True
monkeypatch.setattr(
event,
"_user_can_receive_messages_for_event",
_user_can_receive_messages_for_event,
)
@pytest.fixture
def user_from_token_mocks(monkeypatch):
from beer_garden.api.http.handlers.v1 import event
def decode_token(encoded_token, expected_type):
return {"access": "mytoken"}
def get_user_from_token(access_token):
return "someuser"
monkeypatch.setattr(event, "decode_token", decode_token)
monkeypatch.setattr(event, "get_user_from_token", get_user_from_token)
def token_update_message(token):
return json.dumps({"name": "UPDATE_TOKEN", "payload": token})
class TestEventSocket(AsyncHTTPTestCase):
path = "/api/v1/socket/events"
event = Event(name="EVENT", payload_type="System", payload=System(name="mysystem"))
def get_app(self):
return Application([(self.path, EventSocket)])
@gen.coroutine
def ws_connect(self):
url = f"ws://localhost:{self.get_http_port()}{self.path}"
ws = yield websocket_connect(url)
return ws
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled")
def test_event_socket_requests_authorization_on_connect(self):
ws_client = yield self.ws_connect()
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["name"] == "AUTHORIZATION_REQUIRED"
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled", "user_from_token_mocks")
def test_event_socket_accepts_valid_token_update(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
ws_client.write_message(token_update_message("totallyvalidtoken"))
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["name"] == "TOKEN_UPDATED"
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled")
def test_event_socket_rejects_invalid_token_update(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
access_token = "invalidtoken"
ws_client.write_message(token_update_message(access_token))
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["name"] == "AUTHORIZATION_REQUIRED"
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled")
def test_event_socket_rejects_bad_messages(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
ws_client.write_message("improperly formatted message")
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["name"] == "BAD_MESSAGE"
@gen_test
@pytest.mark.usefixtures("app_config_auth_disabled")
def test_publish_auth_disabled(self):
ws_client = yield self.ws_connect()
EventSocket.publish(self.event)
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["payload"]["name"] == self.event.payload.name
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled")
def test_publish_auth_enabled_requests_authorization(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
EventSocket.publish(self.event)
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["name"] == "AUTHORIZATION_REQUIRED"
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled", "eventsocket_mock")
def test_publish_auth_enabled_publishes_event_for_authorized_user(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
EventSocket.publish(self.event)
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["payload"]["name"] == self.event.payload.name
@gen_test
@pytest.mark.usefixtures("app_config_auth_enabled", "get_current_user_mock")
def test_publish_auth_enabled_publishes_event_without_payload_type(self):
ws_client = yield self.ws_connect()
yield ws_client.read_message() # Read the AUTHORIZATION_REQUIRED message
event = Event(name="ENTRY_STARTED")
EventSocket.publish(event)
response = yield ws_client.read_message()
ws_client.close()
response_dict = json.loads(response)
assert response_dict["payload"] is None
assert response_dict["name"] == event.name
@gen_test
@pytest.mark.usefixtures("app_config_auth_disabled")
def test_publish_skips_events_on_blocklist(self):
yield self.ws_connect()
EventSocket.write_message = Mock()
event = Event(name=WEBSOCKET_EVENT_TYPE_BLOCKLIST[0])
EventSocket.publish(event)
assert EventSocket.write_message.called is False
|
930b391f348c83c4b4cdd114e9112d3b88df7a72
|
0c8ac66ae050e1a98dd8afd7525c9ed74ec5d300
|
/django_school_management/result/migrations/0008_subjectgroup_subjects.py
|
011335ba1b97fcc0d0bf2b62d74be8e307fbced2
|
[] |
no_license
|
TareqMonwer/Django-School-Management
|
5b1c8145d04082063bc14fc9db1ce38b4db97a9d
|
3d425d300a77ad505089a3a4c0a9dc71cacbe89a
|
refs/heads/master
| 2023-08-19T23:36:34.359488
| 2023-08-13T05:53:42
| 2023-08-13T05:53:42
| 221,053,244
| 409
| 163
| null | 2023-08-13T05:53:44
| 2019-11-11T19:22:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
0008_subjectgroup_subjects.py
|
# Generated by Django 2.2.13 on 2020-12-21 06:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('academics', '0010_auto_20201219_0025'),
('result', '0007_subjectgroup'),
]
operations = [
migrations.AddField(
model_name='subjectgroup',
name='subjects',
field=models.ManyToManyField(to='academics.Subject'),
),
]
|
3dfc2bc0486d8c24e9e29a9454d582f71b499901
|
1adebf72de7aa7147b1148ba35280645fbe5bbd3
|
/docs/source/conf.py
|
f0bd5d473b2c38ef98aa26317b69e3f62995634e
|
[
"MIT"
] |
permissive
|
josiah-wolf-oberholtzer/supriya
|
d0c4f921a06e3f9df40f91a226a1c038d3ef84d5
|
2ebf835ce9bbfca19e4220628a32c30fa66e04f7
|
refs/heads/main
| 2023-07-20T00:06:23.955530
| 2023-07-18T03:02:14
| 2023-07-18T03:02:14
| 17,463,359
| 227
| 28
|
MIT
| 2023-07-18T03:02:15
| 2014-03-06T02:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 3,942
|
py
|
conf.py
|
import datetime
import os
import supriya
### SPHINX ###
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
# "sphinx.ext.viewcode",
"sphinxext.opengraph",
"uqbar.sphinx.api",
"uqbar.sphinx.book",
"uqbar.sphinx.inheritance",
"sphinx_immaterial",
]
add_module_names = False
copyright = f"2014-{datetime.date.today().year}, Josiah Wolf Oberholtzer"
exclude_patterns = []
htmlhelp_basename = "Supriyadoc"
language = "en"
master_doc = "index"
project = "Supriya"
pygments_style = "sphinx"
source_suffix = ".rst"
templates_path = ["_templates"]
version = release = supriya.__version__
### GRAPHVIZ ###
graphviz_dot_args = ["-s32"]
graphviz_output_format = "svg"
### INTERSPHINX ###
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"uqbar": ("http://josiahwolfoberholtzer.com/uqbar", None),
}
### OPENGRAPH ###
ogp_site_url = "https://josiahwolfoberholtzer.com/supriya/"
### TODO ###
todo_include_todos = True
### UQBAR API ###
uqbar_api_member_documenter_classes = [
"uqbar.apis.FunctionDocumenter",
"uqbar.apis.ImmaterialClassDocumenter",
]
uqbar_api_module_documenter_class = "uqbar.apis.ImmaterialModuleDocumenter"
uqbar_api_omit_root = True
uqbar_api_root_documenter_class = "uqbar.apis.SummarizingRootDocumenter"
uqbar_api_source_paths = supriya.__path__
uqbar_api_title = "Supriya API Reference"
### UQBAR BOOK ###
uqbar_book_console_setup = ["import supriya"]
uqbar_book_console_teardown = """\
import asyncio
async def shutdown():
for server in tuple(supriya.Server._contexts):
if asyncio.iscoroutine(result := server._shutdown()):
await result
asyncio.run(shutdown())
""".splitlines()
uqbar_book_extensions = [
"uqbar.book.extensions.GraphExtension",
"supriya.ext.book.PlayExtension",
"supriya.ext.book.PlotExtension",
]
uqbar_book_strict = os.environ.get("CI") == "true"
uqbar_book_use_black = True
uqbar_book_use_cache = False
### THEME ###
html_css_files = [
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.4/css/all.min.css"
]
html_domain_indices = True
html_favicon = "favicon.ico"
html_js_files = []
html_logo = "icon.svg"
html_static_path = ["_static"]
html_theme = "sphinx_immaterial"
html_theme_options = {
"icon": {"repo": "fontawesome/brands/github"},
"site_url": "https://josiahwolfoberholtzer.com/supriya/",
"repo_url": "https://github.com/josiah-wolf-oberholtzer/supriya/",
"repo_name": "supriya",
"repo_type": "github",
"edit_uri": "blob/main/docs",
"globaltoc_collapse": False,
"features": [
# "header.autohide",
# "navigation.expand",
# "navigation.instant",
# "navigation.sections",
"navigation.tabs",
"navigation.top",
# "search.highlight",
# "search.share",
# "toc.integrate",
],
"palette": [
{
"media": "(prefers-color-scheme: dark)",
"scheme": "slate",
"primary": "blue-grey",
"accent": "lime",
"toggle": {
"icon": "material/toggle-switch",
"name": "Switch to light mode",
},
},
{
"media": "(prefers-color-scheme: light)",
"scheme": "default",
"primary": "indigo",
"accent": "teal",
"toggle": {
"icon": "material/toggle-switch-off-outline",
"name": "Switch to dark mode",
},
},
],
"version_dropdown": False,
}
html_title = "Supriya"
html_use_index = True
object_description_options = [
("py:.*", dict(include_fields_in_toc=False)), # Hide "Parameters" in TOC
("py:parameter", dict(include_in_toc=False)), # Hide "p" parameter entries in TOC
("py:exception", {"toc_icon_class": "data", "toc_icon_text": "X"}),
]
|
be1f208de4063bd6877cbcec7beb3880138903fd
|
2427bd92fae91f0f9168c7de7f822306faa36fe0
|
/tests/test_sampler.py
|
214be9afa9f2902abe4261a84aa96c73818bf6b5
|
[
"Apache-2.0"
] |
permissive
|
aio-libs/aiozipkin
|
52e4a5dace1e94b4ec1b7391fb670f86446ec4f4
|
0f15660507ffd8a0229fb5053ef55383a7580d7b
|
refs/heads/master
| 2023-08-31T12:33:49.399845
| 2022-07-01T16:32:45
| 2022-07-01T16:32:45
| 105,470,666
| 184
| 34
|
Apache-2.0
| 2023-04-03T16:29:54
| 2017-10-01T20:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
test_sampler.py
|
from aiozipkin.sampler import Sampler
def test_sample_always() -> None:
sampler = Sampler(sample_rate=1.0)
trace_id = "bde15168450e7097008c7aab41c27ade"
assert sampler.is_sampled(trace_id)
assert sampler.is_sampled(trace_id)
assert sampler.is_sampled(trace_id)
def test_sample_never() -> None:
sampler = Sampler(sample_rate=0.0)
trace_id = "bde15168450e7097008c7aab41c27ade"
assert not sampler.is_sampled(trace_id)
assert not sampler.is_sampled(trace_id)
assert not sampler.is_sampled(trace_id)
def test_sample_with_rate() -> None:
sampler = Sampler(sample_rate=0.3, seed=123)
trace_id = "bde15168450e7097008c7aab41c27ade"
assert sampler.is_sampled(trace_id)
assert sampler.is_sampled(trace_id)
assert not sampler.is_sampled(trace_id)
|
b6fdbd02e21dcc93e8aefc4da6fb3253fff214f1
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/1932. Merge BSTs to Create Single BST/1932.py
|
2416f6ae9128d107a835c0e6e2cc9ec834722899
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
1932.py
|
class Solution:
def canMerge(self, trees: List[TreeNode]) -> Optional[TreeNode]:
valToNode = {} # {val: node}
count = collections.Counter() # {val: freq}
for tree in trees:
valToNode[tree.val] = tree
count[tree.val] += 1
if tree.left:
count[tree.left.val] += 1
if tree.right:
count[tree.right.val] += 1
def isValidBST(tree: Optional[TreeNode], minNode: Optional[TreeNode], maxNode: Optional[TreeNode]) -> bool:
if not tree:
return True
if minNode and tree.val <= minNode.val:
return False
if maxNode and tree.val >= maxNode.val:
return False
if not tree.left and not tree.right and tree.val in valToNode:
val = tree.val
tree.left = valToNode[val].left
tree.right = valToNode[val].right
del valToNode[val]
return isValidBST(tree.left, minNode, tree) and isValidBST(tree.right, tree, maxNode)
for tree in trees:
if count[tree.val] == 1:
if isValidBST(tree, None, None) and len(valToNode) <= 1:
return tree
return None
return None
|
7039d88d8c5cd4aec4310dc346556366a8389531
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/client/ssh/test_shell.py
|
37065c4c187601b2b2a59b93fd8737fe353c888f
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
test_shell.py
|
import subprocess
import types
import pytest
import salt.client.ssh.shell as shell
from tests.support.mock import patch
@pytest.fixture
def keys(tmp_path):
pub_key = tmp_path / "ssh" / "testkey.pub"
priv_key = tmp_path / "ssh" / "testkey"
return types.SimpleNamespace(pub_key=pub_key, priv_key=priv_key)
@pytest.mark.skip_on_windows(reason="Windows does not support salt-ssh")
@pytest.mark.skip_if_binaries_missing("ssh", "ssh-keygen", check_all=True)
def test_ssh_shell_key_gen(keys):
"""
Test ssh key_gen
"""
shell.gen_key(str(keys.priv_key))
assert keys.priv_key.exists()
assert keys.pub_key.exists()
# verify there is not a passphrase set on key
ret = subprocess.check_output(
["ssh-keygen", "-f", str(keys.priv_key), "-y"],
timeout=30,
)
assert ret.decode().startswith("ssh-rsa")
@pytest.mark.skip_on_windows(reason="Windows does not support salt-ssh")
@pytest.mark.skip_if_binaries_missing("ssh", "ssh-keygen", check_all=True)
def test_ssh_shell_exec_cmd(caplog):
"""
Test executing a command and ensuring the password
is not in the stdout/stderr logs.
"""
passwd = "12345"
opts = {"_ssh_version": (4, 9)}
host = ""
_shell = shell.Shell(opts=opts, host=host)
_shell.passwd = passwd
with patch.object(_shell, "_split_cmd", return_value=["echo", passwd]):
ret = _shell.exec_cmd("echo {}".format(passwd))
assert not any([x for x in ret if passwd in str(x)])
assert passwd not in caplog.text
with patch.object(_shell, "_split_cmd", return_value=["ls", passwd]):
ret = _shell.exec_cmd("ls {}".format(passwd))
assert not any([x for x in ret if passwd in str(x)])
assert passwd not in caplog.text
|
9f33c2a0f0d3b73103a84c3702c8d8ad79a48927
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/EventFilter/HcalRawToDigi/python/HcalLaserEventFilterProducer_cfi.py
|
02c6d9bc8c2ccccd9c57bb40bce6840ee748fd97
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
HcalLaserEventFilterProducer_cfi.py
|
import FWCore.ParameterSet.Config as cms
import os
HcalLaserEventFilterResult=cms.EDProducer("HcalLaserEventFiltProducer2012",
# Specify laser events to remove in gziped file
eventFileName = cms.string("EventFilter/HcalRawToDigi/data/HCALLaser2012AllDatasets.txt.gz"),
# if verbose==true, run:ls:event for any event failing filter will be printed to cout
verbose = cms.untracked.bool(False),
# Select a prefix to appear before run:ls:event when run info dumped to cout. This makes searching for listed events a bit easier
prefix = cms.untracked.string(""),
# If minrun or maxrun are >-1, then only a subsection of EventList corresponding to the given [minrun,maxrun] range are searched when looking to reject bad events. This can speed up the code a bit when looking over a small section of data, since the bad EventList can be shortened considerably.
minrun = cms.untracked.int32(-1),
maxrun = cms.untracked.int32(-1),
WriteBadToFile = cms.untracked.bool(False), # if set to 'True', then the list of events failing the filter cut will be written to a text file 'badHcalLaserList_eventfilter.txt'. Events in the file will not have any prefix added, but will be a simple list of run:ls:event.
forceFilterTrue=cms.untracked.bool(False) # if specified, filter will always return 'True'. You could use this along with the 'verbose' or 'WriteBadToFile' booleans in order to dump out bad event numbers without actually filtering them
)
|
eed70bc0e6b3d826aa025cc69a7e33b8618e3f46
|
a18320b408bf4f17ff8dd20c90b139c801b9f7a1
|
/docker-build/9.0.5.15/scripts/applyConfig.py
|
6ad64d41fbb56d290acb469ad637769d17bf3227
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WASdev/ci.docker.websphere-traditional
|
6412a0e1e9abe17946100174ffe87242049fbcfe
|
38bd272341e42b572ac2af41d5b14aeb03c5d5c2
|
refs/heads/main
| 2023-08-31T03:23:09.158252
| 2023-08-03T13:49:12
| 2023-08-03T13:49:12
| 49,430,367
| 173
| 217
|
Apache-2.0
| 2023-08-03T13:49:14
| 2016-01-11T14:09:10
|
HTML
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
applyConfig.py
|
import os
import sys
filename = sys.argv[0]
AdminTask.applyConfigProperties('[-propertiesFileName ' + filename + ' -validate true]')
AdminConfig.save()
|
6f991a6ea0985b45d7c4433b0b442e0ea4510f75
|
7a4caf0cc235bc1b3211efbbd9375e9ca2e2b37b
|
/adaptive/tests/unit/test_triangulation.py
|
4aa48a9f06e26c55258eac84c5e7dcd11636e869
|
[
"BSD-3-Clause"
] |
permissive
|
python-adaptive/adaptive
|
7f001c8bd456321a5b16d7b9d411f2266b0d73dc
|
cfe628a24dc9bde795654c7df7a2af9fe098df39
|
refs/heads/main
| 2023-08-31T00:50:47.996931
| 2023-08-14T18:43:36
| 2023-08-14T18:43:36
| 113,714,660
| 1,081
| 69
|
BSD-3-Clause
| 2023-08-20T19:56:48
| 2017-12-10T01:47:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
test_triangulation.py
|
import numpy as np
import pytest
from adaptive.learner.triangulation import Triangulation
###################################
# Points are shaped like this: #
# #
# ..(5) #
# ..--** / #
# (1)** / #
# / | \ / #
# / | \ / #
# / | \ / #
# (2)----(0)----(4) #
# \ | / #
# \ | / #
# \ | / #
# (3) #
# #
###################################
points = np.array([(2, 2), (2, 4), (0, 3), (2, 0), (4, 2), (5, 5)])
def test_triangulation_can_find_the_simplices():
tri = Triangulation(points)
assert tri.simplices == {(0, 1, 4), (0, 1, 2), (0, 2, 3), (0, 3, 4), (1, 4, 5)}
def test_triangulation_can_find_neighbors():
tri = Triangulation(points)
assert tri.get_simplices_attached_to_points((0, 1, 4)) == {
(0, 1, 2),
(0, 3, 4),
(1, 4, 5),
}
assert tri.get_simplices_attached_to_points((1, 4, 5)) == {(0, 1, 4)}
assert tri.get_simplices_attached_to_points((0, 3, 4)) == {(0, 1, 4), (0, 2, 3)}
def test_triangulation_can_find_oposing_points():
tri = Triangulation(points)
assert tri.get_opposing_vertices((0, 1, 4)) == (5, 3, 2)
assert tri.get_opposing_vertices((1, 4, 5)) == (None, None, 0)
assert tri.get_opposing_vertices((0, 1, 2)) == (None, 3, 4)
assert tri.get_opposing_vertices((0, 2, 3)) == (None, 4, 1)
assert tri.get_opposing_vertices((0, 3, 4)) == (None, 1, 2)
def test_triangulation_can_get_oposing_points_if_only_one_simplex_exists():
tri = Triangulation(points[:3])
assert tri.get_opposing_vertices((0, 1, 2)) == (None, None, None)
def test_triangulation_find_opposing_vertices_raises_if_simplex_is_invalid():
tri = Triangulation(points)
with pytest.raises(ValueError):
tri.get_opposing_vertices((0, 2, 1))
with pytest.raises(ValueError):
tri.get_opposing_vertices((2, 3, 5))
def test_circumsphere():
from numpy import allclose
from numpy.random import normal, uniform
from adaptive.learner.triangulation import circumsphere, fast_norm
def generate_random_sphere_points(dim, radius=0):
"""https://math.stackexchange.com/a/1585996"""
vec = [None] * (dim + 1)
center = uniform(-100, 100, dim)
radius = uniform(1.0, 100.0) if radius == 0 else radius
for i in range(dim + 1):
points = normal(0, size=dim)
x = fast_norm(points)
points = points / x * radius
vec[i] = tuple(points + center)
return radius, center, vec
for dim in range(2, 10):
radius, center, points = generate_random_sphere_points(dim)
circ_center, circ_radius = circumsphere(points)
err_msg = ""
if not allclose(circ_center, center):
err_msg += f"Calculated center ({circ_center}) differs from true center ({center})\n"
if not allclose(radius, circ_radius):
err_msg += (
f"Calculated radius {circ_radius} differs from true radius {radius}\n"
)
if err_msg:
raise AssertionError(err_msg)
|
5a022aa6555ce0001cba21df88932eef0eceef5f
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/tests/pipelines/text_to_video/test_text_to_video.py
|
801af7f6b4e6253965ae36eccfd5caa9ef9e5ed8
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,668
|
py
|
test_text_to_video.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
TextToVideoSDPipeline,
UNet3DConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = TextToVideoSDPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet3DConditionModel(
block_out_channels=(32, 64, 64, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"),
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
cross_attention_dim=32,
attention_head_dim=4,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=512,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def test_text_to_video_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = TextToVideoSDPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["output_type"] = "np"
frames = sd_pipe(**inputs).frames
image_slice = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
expected_slice = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2)
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self):
pass
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_single_identical(self):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def test_num_images_per_prompt(self):
pass
def test_progress_bar(self):
return super().test_progress_bar()
@slow
@skip_mps
@require_torch_gpu
class TextToVideoSDPipelineSlowTests(unittest.TestCase):
def test_two_step_model(self):
expected_video = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy"
)
pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
pipe = pipe.to(torch_device)
prompt = "Spiderman is surfing"
generator = torch.Generator(device="cpu").manual_seed(0)
video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames
video = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
|
9158199276a43e9616315d35f1eba3cd5f3d0f82
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/data-engineering/demos/composer_ml_pipeline/chicago_taxifare/trainer/task.py
|
d40ef3506066cb9aad8fcf2179add22cb1c1ccd8
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
task.py
|
import argparse
from . import model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--train_data_path",
help="GCS or local path to training data",
required=True
)
parser.add_argument(
"--train_epochs",
help="Steps to run the training job for (default: 5)",
type=int,
default=10
)
parser.add_argument(
"--eval_data_path",
help="GCS or local path to evaluation data",
required=True
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models",
required=True
)
parser.add_argument(
"--log_dir",
help="GCS location to write Tensorboard logs",
required=True
)
parser.add_argument(
"--job-dir",
help="This is not used by our model, but it is required by gcloud",
)
hparams = parser.parse_args().__dict__
model.train_and_evaluate(hparams)
|
c10b6161bb584c8ea671b43066b165127cdcb19f
|
4a0e17a639b8d657ff263863d1df2cb1e60b68c5
|
/setup.py
|
7273b897719cbc5f71dc566c57b59e9080efade4
|
[
"BSD-2-Clause"
] |
permissive
|
ianozsvald/ipython_memory_usage
|
af785a13e43bebb3adc3476118c6186606b83de6
|
a2c28197f67e249f12aa1f603b9a4bf712f70add
|
refs/heads/master
| 2023-08-27T08:34:17.948989
| 2021-05-14T12:09:08
| 2021-05-14T12:09:08
| 21,815,824
| 333
| 58
|
BSD-2-Clause
| 2019-11-02T14:44:46
| 2014-07-14T10:26:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
setup.py
|
#!/usr/bin/env python
"""ipython_memory_usage: display memory usage during IPython execution
ipython_memory_usage is an IPython tool to report memory usage deltas for every command you type.
"""
doclines = __doc__.split("\n")
# Chosen from http://www.python.org/pypi?:action=list_classifiers
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Free To Use But Restricted
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Software Development :: Testing
"""
from setuptools import setup, find_packages
setup(
name="ipython_memory_usage",
version="1.1",
url="https://github.com/ianozsvald/ipython_memory_usage",
author="Ian Ozsvald",
author_email="ian@ianozsvald.com",
maintainer="Ian Ozsvald",
maintainer_email="ian@ianozsvald.com",
description=doclines[0],
long_description = """IPython tool to report memory usage deltas for every command you type. If you are running out of RAM then use this tool to understand what's happening. It also records the time spent running each command. \n
In [3]: arr=np.random.uniform(size=int(1e7))\n
'arr=np.random.uniform(size=int(1e7))' used 76.2578 MiB RAM in 0.33s, peaked 0.00 MiB above current, total RAM usage 107.37 MiB
""",
long_description_content_type='text/markdown',
classifiers=filter(None, classifiers.split("\n")),
platforms=["Any."],
packages=['ipython_memory_usage'],
package_dir={'': 'src'},
install_requires=['IPython>=2.1', 'memory_profiler']
)
|
6c24535e2f5aac8cf0d7b57039633ab69c4dc2a7
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/cli/utils/multi_gpu.py
|
f1ffe7774bc32d248ea3872e434931601afe9144
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 12,862
|
py
|
multi_gpu.py
|
"""Multi GPU training utility."""
# Copyright (C) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import datetime
import logging
import os
import signal
import socket
import sys
import threading
import time
from contextlib import closing
from typing import Callable, List, Optional, Union
import psutil
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from otx.api.configuration import ConfigurableParameters
logger = logging.getLogger(__name__)
def _get_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(("", 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock.getsockname()[1]
def get_gpu_ids(gpus: str) -> List[int]:
"""Get proper GPU indices form `--gpu` arguments.
Given `--gpus` argument, exclude inappropriate indices and transform to list of int format.
Args:
gpus (str): GPU indices to use. Format should be Comma-separated indices.
Returns:
List[int]:
list including proper GPU indices.
"""
num_available_gpu = torch.cuda.device_count()
gpu_ids = []
for gpu_id in gpus.split(","):
if not gpu_id.isnumeric():
raise ValueError("--gpus argument should be numbers separated by ','.")
gpu_ids.append(int(gpu_id))
wrong_gpus = []
for gpu_idx in gpu_ids:
if gpu_idx >= num_available_gpu:
wrong_gpus.append(gpu_idx)
for wrong_gpu in wrong_gpus:
gpu_ids.remove(wrong_gpu)
if wrong_gpus:
logger.warning(f"Wrong gpu indices are excluded. {','.join([str(val) for val in gpu_ids])} GPU will be used.")
return gpu_ids
def set_arguments_to_argv(keys: Union[str, List[str]], value: Optional[str] = None, after_params: bool = False):
"""Add arguments at proper position in `sys.argv`.
Args:
keys (str or List[str]): arguement keys.
value (str or None): argument value.
after_params (bool): whether argument should be after `param` or not.
"""
if not isinstance(keys, list):
keys = [keys]
for key in keys:
if key in sys.argv:
if value is not None:
sys.argv[sys.argv.index(key) + 1] = value
return
key = keys[0]
if not after_params and "params" in sys.argv:
sys.argv.insert(sys.argv.index("params"), key)
if value is not None:
sys.argv.insert(sys.argv.index("params"), value)
else:
if after_params and "params" not in sys.argv:
sys.argv.append("params")
if value is not None:
sys.argv.extend([key, value])
else:
sys.argv.append(key)
def is_multigpu_child_process():
"""Check current process is a child process for multi GPU training."""
return (dist.is_initialized() or "TORCHELASTIC_RUN_ID" in os.environ) and os.environ["LOCAL_RANK"] != "0"
class MultiGPUManager:
"""Class to manage multi GPU training.
Args:
train_func (Callable): model training function.
gpu_ids (str): GPU indices to use. Format should be Comma-separated indices.
rdzv_endpoint (str): Rendezvous endpoint for multi-node training.
base_rank (int): Base rank of the worker.
world_size (int): Total number of workers in a worker group.
start_time (Optional[datetime.datetime]): Time when process starts.
This value is used to decide timeout argument of distributed training.
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
train_func: Callable,
gpu_ids: str,
rdzv_endpoint: str = "localhost:0",
base_rank: int = 0,
world_size: int = 0,
start_time: Optional[datetime.datetime] = None,
):
if ":" not in rdzv_endpoint:
raise ValueError("rdzv_endpoint must be in form <host>:<port>.")
host, port = rdzv_endpoint.split(":")
if port == "0":
assert host in ["localhost", "127.0.0.1"]
port = _get_free_port()
rdzv_endpoint = f"{host}:{port}"
self._train_func = train_func
self._gpu_ids = get_gpu_ids(gpu_ids)
self._rdzv_endpoint = rdzv_endpoint
self._base_rank = base_rank
if world_size == 0:
world_size = len(self._gpu_ids)
self._world_size = world_size
self._main_pid = os.getpid()
self._processes: List[mp.Process] = []
if start_time is not None:
elapsed_time = datetime.datetime.now() - start_time
if elapsed_time > datetime.timedelta(seconds=40):
os.environ["TORCH_DIST_TIMEOUT"] = str(int(elapsed_time.total_seconds() * 1.5))
def is_available(self) -> bool:
"""Check multi GPU training is available.
Returns:
bool:
whether multi GPU training is available.
"""
return (
len(self._gpu_ids) > 1
and "TORCHELASTIC_RUN_ID"
not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.
)
def setup_multi_gpu_train(
self,
output_path: str,
optimized_hyper_parameters: Optional[ConfigurableParameters] = None,
):
"""Carry out what should be done to run multi GPU training.
Args:
output_path (str): output path where task output are saved.
optimized_hyper_parameters (ConfigurableParameters or None): hyper parameters reflecting HPO result.
Returns:
str:
If output_path is None, make a temporary directory and return it.
"""
if optimized_hyper_parameters is not None: # if HPO is executed, optimized HPs are applied to child processes
self._set_optimized_hp_for_child_process(optimized_hyper_parameters)
self._processes = self._spawn_multi_gpu_processes(output_path)
signal.signal(signal.SIGINT, self._terminate_signal_handler)
signal.signal(signal.SIGTERM, self._terminate_signal_handler)
self.initialize_multigpu_train(self._rdzv_endpoint, self._base_rank, 0, self._gpu_ids, self._world_size)
threading.Thread(target=self._check_child_processes_alive, daemon=True).start()
def finalize(self):
"""Join all child processes."""
for p in self._processes:
if p.join(30) is None and p.exitcode is None:
p.kill()
@staticmethod
def initialize_multigpu_train(
rdzv_endpoint: str,
rank: int,
local_rank: int,
gpu_ids: List[int],
world_size: int,
):
"""Initilization for multi GPU training.
Args:
rdzv_endpoint (str): Rendezvous endpoint for multi-node training.
rank (int): The rank of worker within a worker group.
local_rank (int): The rank of worker within a local worker group.
gpu_ids (List[int]): list including which GPU indeces will be used.
world_size (int): Total number of workers in a worker group.
"""
host, port = rdzv_endpoint.split(":")
os.environ["MASTER_ADDR"] = host
os.environ["MASTER_PORT"] = port
os.environ["LOCAL_WORLD_SIZE"] = str(len(gpu_ids))
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
os.environ["RANK"] = str(rank)
@staticmethod
def run_child_process(
train_func: Callable,
output_path: str,
rdzv_endpoint: str,
rank: int,
local_rank: int,
gpu_ids: List[int],
world_size: int,
):
"""Function for multi GPU child process to execute.
Args:
train_func (Callable): model training function.
output_path (str): output path where task output are saved.
rdzv_endpoint (str): Rendezvous endpoint for multi-node training.
rank (int): The rank of worker within a worker group.
local_rank (int): The rank of worker within a local worker group.
gpu_ids (List[int]): list including which GPU indeces will be used.
world_size (int): Total number of workers in a worker group.
"""
# initialize start method
mp.set_start_method(method=None, force=True)
gpus_arg_idx = sys.argv.index("--gpus")
for _ in range(2):
sys.argv.pop(gpus_arg_idx)
if "--enable-hpo" in sys.argv:
sys.argv.remove("--enable-hpo")
set_arguments_to_argv(["-o", "--output"], output_path)
set_arguments_to_argv("--rdzv-endpoint", rdzv_endpoint)
MultiGPUManager.initialize_multigpu_train(rdzv_endpoint, rank, local_rank, gpu_ids, world_size)
threading.Thread(target=MultiGPUManager.check_parent_processes_alive, daemon=True).start()
train_func()
@staticmethod
def check_parent_processes_alive():
"""Check parent process is alive and if not, exit by itself."""
cur_process = psutil.Process()
parent = cur_process.parent()
while True:
time.sleep(1)
if not parent.is_running():
break
logger.warning("Parent process is terminated abnormally. Process exits.")
cur_process.kill()
def _spawn_multi_gpu_processes(self, output_path: str) -> List[mp.Process]:
processes = []
ctx = mp.get_context("spawn")
# set CUDA_VISIBLE_DEVICES to make child process use proper GPU
origin_cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
if origin_cuda_visible_devices is not None:
cuda_visible_devices = origin_cuda_visible_devices.split(",")
else:
cuda_visible_devices = [str(i) for i in range(torch.cuda.device_count())]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([cuda_visible_devices[gpu_idx] for gpu_idx in self._gpu_ids])
for rank in range(1, len(self._gpu_ids)):
task_p = ctx.Process(
target=MultiGPUManager.run_child_process,
args=(
self._train_func,
output_path,
self._rdzv_endpoint,
self._base_rank + rank,
rank,
self._gpu_ids,
self._world_size,
),
)
task_p.start()
processes.append(task_p)
if origin_cuda_visible_devices is None:
del os.environ["CUDA_VISIBLE_DEVICES"]
else:
os.environ["CUDA_VISIBLE_DEVICES"] = origin_cuda_visible_devices
return processes
def _terminate_signal_handler(self, signum, _frame):
# This code prevents child processses from being killed unintentionally by proccesses forked from main process
if self._main_pid != os.getpid():
sys.exit()
self._kill_child_process()
singal_name = {2: "SIGINT", 15: "SIGTERM"}
logger.warning(f"{singal_name[signum]} is sent. process exited.")
sys.exit(1)
def _kill_child_process(self):
for process in self._processes:
if process.is_alive():
logger.warning(f"Kill child process {process.pid}")
process.kill()
def _set_optimized_hp_for_child_process(self, hyper_parameters: ConfigurableParameters):
set_arguments_to_argv(
"--learning_parameters.learning_rate",
str(hyper_parameters.learning_parameters.learning_rate), # type: ignore[attr-defined]
True,
)
set_arguments_to_argv(
"--learning_parameters.batch_size",
str(hyper_parameters.learning_parameters.batch_size), # type: ignore[attr-defined]
True,
)
def _check_child_processes_alive(self):
child_is_running = True
while child_is_running:
time.sleep(1)
for p in self._processes:
if not p.is_alive() and p.exitcode != 0:
child_is_running = False
break
logger.warning("Some of child processes are terminated abnormally. process exits.")
self._kill_child_process()
os.kill(self._main_pid, signal.SIGKILL)
|
39554c327f65c150832ede5b2468172d605282a4
|
72378321136063ebf5002b6103f2fe2f8007f961
|
/Chapter07/onboarding.py
|
3c9b0892c912359155f1164fc95c0234d103a4ca
|
[
"MIT"
] |
permissive
|
PacktPublishing/Neural-Network-Projects-with-Python
|
cf749542e032b352fe183b0962940face73ad6e9
|
e1decc34b0f52f946eee5d421e376c511959e772
|
refs/heads/master
| 2023-01-30T03:48:33.637619
| 2023-01-18T09:26:49
| 2023-01-18T09:26:49
| 148,056,623
| 307
| 178
|
MIT
| 2023-09-12T00:11:42
| 2018-09-09T19:02:32
|
Python
|
UTF-8
|
Python
| false
| false
| 689
|
py
|
onboarding.py
|
import cv2
import math
import utils
import face_detection
video_capture = cv2.VideoCapture(0)
counter = 5
while True:
_, frame = video_capture.read()
frame, face_box, face_coords = face_detection.detect_faces(frame)
text = 'Image will be taken in {}..'.format(math.ceil(counter))
if face_box is not None:
frame = utils.write_on_frame(frame, text, face_coords[0], face_coords[1]-10)
cv2.imshow('Video', frame)
cv2.waitKey(1)
counter -= 0.1
if counter <= 0:
cv2.imwrite('true_img.png', face_box)
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
print("Onboarding Image Captured")
|
70781e2c7618ccfee93f760d3c8a00f7b0f54326
|
c1ff870879152fba2b54eddfb7591ec322eb3061
|
/plugins/render/ogreRender/3rdParty/ogre/Tools/Blender2.6Export/ogre_mesh_exporter/mesh_panel.py
|
099a68cdbffd762b6d476a89271cefff7e0a3345
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
MTASZTAKI/ApertusVR
|
1a9809fb7af81c3cd7fb732ed481ebe4ce66fefa
|
424ec5515ae08780542f33cc4841a8f9a96337b3
|
refs/heads/0.9
| 2022-12-11T20:03:42.926813
| 2019-10-11T09:29:45
| 2019-10-11T09:29:45
| 73,708,854
| 188
| 55
|
MIT
| 2022-12-11T08:53:21
| 2016-11-14T13:48:00
|
C++
|
UTF-8
|
Python
| false
| false
| 11,693
|
py
|
mesh_panel.py
|
# ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
import bpy
# Helper class to get linked skeleton settings from mesh object
def getSkeletonSettings(context):
# get linked armature
meshObject = context.object
parentObject = meshObject.parent
armatureObject = None
if (parentObject and meshObject.parent_type == 'ARMATURE'):
armatureObject = parentObject
else:
# check modifier stack, use first valid armature modifier.
for modifier in meshObject.modifiers:
if (modifier.type == 'ARMATURE' and
(modifier.use_vertex_groups or
modifier.use_bone_envelopes)):
armatureObject = modifier.object
return armatureObject.data.ogre_mesh_exporter \
if (armatureObject is not None) else None
class MeshExporterPanel(bpy.types.Panel):
bl_idname = "ogre3d_mesh_panel"
bl_label = "Ogre Mesh Exporter"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
@classmethod
def poll(cls, context):
return context.mesh
def draw(self, context):
layout = self.layout
globalSettings = bpy.context.scene.ogre_mesh_exporter
meshSettings = context.mesh.ogre_mesh_exporter
skeletonSettings = getSkeletonSettings(context)
row = layout.row(True)
row.prop(meshSettings, "exportEnabled", icon = 'EXPORT', toggle = True)
row = row.row()
row.alignment = 'RIGHT'
row.scale_x = 0.3
row.enabled = meshSettings.exportEnabled
row.prop_enum(meshSettings, "exportTab", 'mesh', text = "", icon = 'MESH_MONKEY')
row.prop_enum(meshSettings, "exportTab", 'animation', text = "", icon = 'ANIM')
row.prop_enum(meshSettings, "exportTab", 'settings', text = "", icon = 'SETTINGS')
if (not meshSettings.exportEnabled): return
# prepare material slot shared vertex info.
subMeshProperties = meshSettings.subMeshProperties
materialList = context.mesh.materials
materialCount = len(materialList)
while (len(subMeshProperties) < materialCount): subMeshProperties.add() # add more items if needed.
while (len(subMeshProperties) > materialCount): subMeshProperties.remove(0) # remove items if needed.
if (meshSettings.exportTab == 'mesh'):
layout.label("Submesh Properties:")
submeshNames = list()
box = layout.box()
if (len(subMeshProperties) == 0): box.label("No Materials Defined", icon = 'INFO')
for index, subMeshProperty in enumerate(subMeshProperties):
row = box.row(True)
# Material index & name.
material = materialList[index]
row.label("[%d]%s" % (index, "NONE" if (material == None) else materialList[index].name), icon = 'ERROR' if (material == None) else 'MATERIAL')
# Submesh name.
subrow = row.row()
if (subMeshProperty.name in submeshNames): subrow.alert = True
else: submeshNames.append(subMeshProperty.name)
subrow.prop(subMeshProperty, "name", "")
# Use shared vertices.
row.prop(subMeshProperty, "useSharedVertices", "", icon = 'GROUP_VERTEX')
# Select vertices under this submesh.
if (context.mode == 'EDIT_MESH'):
prop = row.operator("ogre3d.select_submesh_vertices", "", icon='MESH_DATA')
prop.index = index
elif (meshSettings.exportTab == 'animation'):
# Animations.
layout.separator()
row = layout.row(True)
row.label("Animations:")
row.prop_enum(meshSettings, "animationTab", 'skel', icon = 'POSE_DATA')
row.prop_enum(meshSettings, "animationTab", 'pose', icon = 'OUTLINER_DATA_MESH')
row.prop_enum(meshSettings, "animationTab", 'morph', icon = 'OUTLINER_DATA_MESH')
box = layout.box()
table = box.column(True)
row = table.row()
col = row.column()
delCol = row.column()
# draw grid header.
row = col.row()
row.prop(globalSettings, "dummyTrue", toggle = True, text = "Action")
row.prop(globalSettings, "dummyTrue", toggle = True, text = "Name")
frameRow = row.row()
frameRow.scale_x = 0.5
frameRow.prop(globalSettings, "dummyTrue", toggle = True, text = "Start")
frameRow.prop(globalSettings, "dummyTrue", toggle = True, text = "End")
delCol.prop(globalSettings, "dummyTrue", text = "", icon = 'SCRIPTWIN')
# Populate skeletal animation action list.
if (meshSettings.animationTab == 'skel'):
if (skeletonSettings is not None):
if (len(skeletonSettings.exportSkeletonActions) == 0):
table.prop(globalSettings, "dummyFalse", toggle = True, text = "No Animations")
for index, item in enumerate(skeletonSettings.exportSkeletonActions):
row = col.row()
row.prop(item, "action", text = "", icon = 'ACTION')
row.prop(item, "name", text = "")
frameRow = row.row()
frameRow.scale_x = 0.5
frameRow.prop(item, "startFrame", text = "")
frameRow.prop(item, "endFrame", text = "")
prop = delCol.operator("ogre3d.skeleton_delete_animation", text = "", icon = 'ZOOMOUT')
prop.index = index
box.operator("ogre3d.skeleton_add_animation", icon = 'ZOOMIN')
else:
table.prop(globalSettings, "dummyFalse", toggle = True, text = "No Armature Link")
elif (meshSettings.animationTab == 'pose'):
table.prop(globalSettings, "dummyFalse", toggle = True, text = "Not Implemented Yet")
else:
table.prop(globalSettings, "dummyFalse", toggle = True, text = "Not Implemented Yet")
else:
# Mesh override settings:
layout.label("Mesh Override Settings:")
col = layout.column(True)
row = col.row()
overrideSetting = meshSettings.requireMaterials_override
row.prop(meshSettings, "requireMaterials_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "requireMaterials", toggle = True)
row = col.row()
overrideSetting = meshSettings.skeletonNameFollowMesh_override
row.prop(meshSettings, "skeletonNameFollowMesh_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "skeletonNameFollowMesh", toggle = True)
row = col.row()
overrideSetting = meshSettings.applyModifiers_override
row.prop(meshSettings, "applyModifiers_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "applyModifiers", toggle = True)
# XML Converter override settings:
layout.label("XML Converter Override Settings:")
col = layout.column(True)
row = col.row()
overrideSetting = meshSettings.extremityPoints_override
row.prop(meshSettings, "extremityPoints_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "extremityPoints")
row = col.row()
overrideSetting = meshSettings.edgeLists_override
row.prop(meshSettings, "edgeLists_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "edgeLists", toggle = True)
row = col.row()
overrideSetting = meshSettings.tangent_override
row.prop(meshSettings, "tangent_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "tangent", toggle = True)
row = col.row()
overrideSetting = meshSettings.tangentSemantic_override
row.prop(meshSettings, "tangentSemantic_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "tangentSemantic", "")
row = col.row()
overrideSetting = meshSettings.tangentSize_override
row.prop(meshSettings, "tangentSize_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "tangentSize", "")
row = col.row()
overrideSetting = meshSettings.splitMirrored_override
row.prop(meshSettings, "splitMirrored_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "splitMirrored", toggle = True)
row = col.row()
overrideSetting = meshSettings.splitRotated_override
row.prop(meshSettings, "splitRotated_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "splitRotated", toggle = True)
row = col.row()
overrideSetting = meshSettings.reorganiseVertBuff_override
row.prop(meshSettings, "reorganiseVertBuff_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "reorganiseVertBuff", toggle = True)
row = col.row()
overrideSetting = meshSettings.optimiseAnimation_override
row.prop(meshSettings, "optimiseAnimation_override", "",
icon = 'PINNED' if overrideSetting else 'UNPINNED', toggle = True)
if (not overrideSetting):
row = row.row()
row.enabled = False
row.prop(meshSettings, "optimiseAnimation", toggle = True)
class OperatorSelectSubmeshVertices(bpy.types.Operator):
bl_idname = "ogre3d.select_submesh_vertices"
bl_label = "Select"
bl_description = "Select submesh vertices."
index = bpy.props.IntProperty()
def invoke(self, context, event):
context.object.active_material_index = self.index
bpy.ops.mesh.select_all(action = 'DESELECT')
bpy.ops.object.material_slot_select()
return {'FINISHED'}
class OperatorSkeletonAddAnimation(bpy.types.Operator):
bl_idname = "ogre3d.skeleton_add_animation"
bl_label = "Add"
bl_description = "Add new skeleton animation."
def invoke(self, context, event):
skeletonSettings = getSkeletonSettings(context)
item = skeletonSettings.exportSkeletonActions.add()
item.onActionChanged(context)
return {'FINISHED'}
class OperatorSkeletonDeleteAnimation(bpy.types.Operator):
bl_idname = "ogre3d.skeleton_delete_animation"
bl_label = "Delete"
bl_description = "Delete skeleton animation."
index = bpy.props.IntProperty()
def invoke(self, context, event):
skeletonSettings = getSkeletonSettings(context)
skeletonSettings.exportSkeletonActions.remove(self.index)
return {'FINISHED'}
|
b20b0ab340b3d5d1eb61f3c6fe2f2f9fc86bcf06
|
59546d74cac6a55f5dc94990c7aec236feb062c7
|
/lungmask/__init__.py
|
bbebd0bdcf73cc46a0d5e4fa4bc693f963edf6f7
|
[
"Apache-2.0"
] |
permissive
|
JoHof/lungmask
|
2bba6f4ce5b59b3cad1185dc56342776f1de1d03
|
096bed3fc51437902265615060062258becc736c
|
refs/heads/master
| 2023-09-01T08:02:29.833551
| 2023-08-16T18:19:18
| 2023-08-16T18:19:18
| 224,678,136
| 562
| 144
|
Apache-2.0
| 2023-07-25T21:33:39
| 2019-11-28T15:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 28
|
py
|
__init__.py
|
from .mask import LMInferer
|
454b0dc5d676f931244be395453515e7228974b1
|
54606d99cd0c8bc2c34cded2bc433be1cc5a4968
|
/05-pseudo-whitening
|
c11ad627e12107fd7abf0a87ce248e734d0b3c7b
|
[
"MIT"
] |
permissive
|
jbornschein/mpi4py-examples
|
dfe71831149a2917f46a13252f169757efbecf9f
|
c0f5fa18bab612fb5afbd9a5bdbc47521d6c6dd6
|
refs/heads/master
| 2022-07-09T12:12:34.557268
| 2020-06-14T13:21:22
| 2020-06-14T13:21:22
| 809,089
| 333
| 129
|
MIT
| 2018-08-01T10:08:31
| 2010-07-31T09:51:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,942
|
05-pseudo-whitening
|
#!/usr/bin/env python
"""
How to run:
mpirun -np <NUM> ./pseudo-whitening <INPUT-IMAGES.h5> <OUTPUT-IMAGES.h5>
"""
from __future__ import division
import sys
import tables
import numpy as np
from numpy.fft import fft2, ifft2
from mpi4py import MPI
from parutils import pprint
#=============================================================================
# Main
comm = MPI.COMM_WORLD
in_fname = sys.argv[-2]
out_fname = sys.argv[-1]
try:
h5in = tables.openFile(in_fname, 'r')
except:
pprint("Error: Could not open file %s" % in_fname)
exit(1)
#
images = h5in.root.images
image_count, height, width = images.shape
image_count = min(image_count, 200)
pprint("============================================================================")
pprint(" Running %d parallel MPI processes" % comm.size)
pprint(" Reading images from '%s'" % in_fname)
pprint(" Processing %d images of size %d x %d" % (image_count, width, height))
pprint(" Writing whitened images into '%s'" % out_fname)
# Prepare convolution kernel in frequency space
kernel_ = np.zeros((height, width))
# rank 0 needs buffer space to gather data
if comm.rank == 0:
gbuf = np.empty( (comm.size, height, width) )
else:
gbuf = None
# Distribute workload so that each MPI process processes image number i, where
# i % comm.size == comm.rank.
#
# For example if comm.size == 4:
# rank 0: 0, 4, 8, ...
# rank 1: 1, 5, 9, ...
# rank 2: 2, 6, 10, ...
# rank 3: 3, 7, 11, ...
#
# Each process reads the image from the HDF file by itself. Sadly, python-tables
# does not support parallel writes from multiple processes into the same HDF
# file. So we have to serialize the write operation: Process 0 gathers all
# whitened images and writes them.
comm.Barrier() ### Start stopwatch ###
t_start = MPI.Wtime()
for i_base in range(0, image_count, comm.size):
i = i_base + comm.rank
#
if i <image_count:
img = images[i] # load image from HDF file
img_ = fft2(img) # 2D FFT
whi_ = img_ * kernel_ # multiply with kernel in freq.-space
whi = np.abs(ifft2(whi_)) # inverse FFT back into image space
# rank 0 gathers whitened images
comm.Gather(
[whi, MPI.DOUBLE], # send buffer
[gbuf, MPI.DOUBLE], # receive buffer
root=0 # rank 0 is root the root-porcess
)
# rank 0 has to write into the HDF file
if comm.rank == 0:
# Sequentially append each of the images
for r in range(comm.size):
pass
#h5out.append( {'image': gbuf[r]} )
comm.Barrier()
t_diff = MPI.Wtime()-t_start ### Stop stopwatch ###
h5in.close()
#h5out.close()
pprint(
" Whitened %d images in %5.2f seconds: %4.2f images per second" %
(image_count, t_diff, image_count/t_diff)
)
pprint("============================================================================")
|
|
6193edb482426fc9ccb71388ee5673655f8fbef8
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_torchscript_utils.py
|
ec24f388f1a0995dc884d4858ea267e541a60fa5
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,089
|
py
|
test_torchscript_utils.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import tempfile
import unittest
import torch
from monai.config import get_config_values
from monai.data import load_net_with_metadata, save_net_with_metadata
from monai.utils import JITMetadataKeys
class TestModule(torch.nn.Module):
def forward(self, x):
return x + 10
class TestTorchscript(unittest.TestCase):
def test_save_net_with_metadata(self):
"""Save a network without metadata to a file."""
m = torch.jit.script(TestModule())
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test")
self.assertTrue(os.path.isfile(f"{tempdir}/test.ts"))
def test_save_net_with_metadata_ext(self):
"""Save a network without metadata to a file."""
m = torch.jit.script(TestModule())
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test.zip")
self.assertTrue(os.path.isfile(f"{tempdir}/test.zip"))
def test_save_net_with_metadata_with_extra(self):
"""Save a network with simple metadata to a file."""
m = torch.jit.script(TestModule())
test_metadata = {"foo": [1, 2], "bar": "string"}
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata)
self.assertTrue(os.path.isfile(f"{tempdir}/test.ts"))
def test_load_net_with_metadata(self):
"""Save then load a network with no metadata or other extra files."""
m = torch.jit.script(TestModule())
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test")
_, meta, extra_files = load_net_with_metadata(f"{tempdir}/test.ts")
del meta[JITMetadataKeys.TIMESTAMP.value] # no way of knowing precisely what this value would be
self.assertEqual(meta, get_config_values())
self.assertEqual(extra_files, {})
def test_load_net_with_metadata_with_extra(self):
"""Save then load a network with basic metadata."""
m = torch.jit.script(TestModule())
test_metadata = {"foo": [1, 2], "bar": "string"}
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata)
_, meta, extra_files = load_net_with_metadata(f"{tempdir}/test.ts")
del meta[JITMetadataKeys.TIMESTAMP.value] # no way of knowing precisely what this value would be
test_compare = get_config_values()
test_compare.update(test_metadata)
self.assertEqual(meta, test_compare)
self.assertEqual(extra_files, {})
def test_save_load_more_extra_files(self):
"""Save then load extra file data from a torchscript file."""
m = torch.jit.script(TestModule())
test_metadata = {"foo": [1, 2], "bar": "string"}
more_extra_files = {"test.txt": b"This is test data"}
with tempfile.TemporaryDirectory() as tempdir:
save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata, more_extra_files=more_extra_files)
self.assertTrue(os.path.isfile(f"{tempdir}/test.ts"))
_, _, loaded_extra_files = load_net_with_metadata(f"{tempdir}/test.ts", more_extra_files=("test.txt",))
self.assertEqual(more_extra_files["test.txt"], loaded_extra_files["test.txt"])
if __name__ == "__main__":
unittest.main()
|
1b4ef1edc1950c55e1fe3aa0eeee520907341a47
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/ansible/roles/lib_openshift_3.2/build/src/oc_scale.py
|
1318608492f8d7e85a3867daf4a89209587ccae1
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,075
|
py
|
oc_scale.py
|
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self.namespace = namespace
self.kubeconfig = kubeconfig
self.verbose = verbose
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
#self.resource.get_volumes()
#self.resource.update_volume_mount(self.volume_mount)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
|
3579a46f04dd766b68699d9be1dedc873add01c6
|
0009c76a25c89a0d61d3bc9e10071da58bdfaa5a
|
/py/ztools/Fs/pyNPDM.py
|
328874bf59d46175b5df1759344090b07127058e
|
[
"MIT"
] |
permissive
|
julesontheroad/NSC_BUILDER
|
84054e70a80b572088b0806a47ceb398302451b5
|
e9083e83383281bdd9e167d3141163dcc56b6710
|
refs/heads/master
| 2023-07-05T05:23:17.114363
| 2021-11-15T19:34:47
| 2021-11-15T19:34:47
| 149,040,416
| 1,249
| 143
|
MIT
| 2022-12-15T03:19:33
| 2018-09-16T22:18:01
|
Python
|
UTF-8
|
Python
| false
| false
| 6,206
|
py
|
pyNPDM.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import io
import re
from Utils import read_at, read_u8, read_u32, read_u64, memdump
class FsAccessControl:
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ' FS Access Control:\n'
string += ' Version: %d\n' % self.version
string += ' Permission bitmask: %016x\n' % self.permissions
return string
def _parse(self):
self.version = read_u8(self.f, 0x0)
self.permissions = read_u64(self.f, 0x4)
class ServiceAccessControl:
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ' Service Access Control:\n'
string += ' ' + '\n '.join('%02x - %s' % (int.from_bytes(b, byteorder='little'), s)
for s, b in self.services.items()) + '\n'
return string
def _parse(self):
serv = re.compile(b'([\x02-\x07])(([a-z0-9-]+:?)+)', re.I)
self.services = {r[2].decode(): r[1] for r in re.finditer(serv, self.f.read())}
class KernelAccessControl:
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ' Kernel Access Control:\n'
string += ' Placeholder\n'
return string
def _parse(self):
# TODO
pass
class ACID:
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ' ACID:\n'
string += ' ACID flags: %d\n' % self.flags
string += ' TitleID range: %016x-%016x\n' % (self.tid_min, self.tid_max)
string += memdump(self.rsa_sig, message=' RSA signature: \n') + '\n'
string += memdump(self.rsa_pubk, message=' RSA public key: \n') + '\n'
string += '\n' + str(self.fs_access_control)
string += '\n' + str(self.service_access_control)
string += '\n' + str(self.kernel_access_control)
return string
def _parse(self):
self.rsa_sig = read_at(self.f, 0x0, 0x100)
self.rsa_pubk = read_at(self.f, 0x100, 0x100)
if read_at(self.f, 0x200, 0x4) != b'ACID':
raise ValueError('Invalid ACID magic')
self.flags = read_u32(self.f, 0x20C)
self.tid_min = read_u64(self.f, 0x210)
self.tid_max = read_u64(self.f, 0x218)
fs_access_control_offset = read_u32(self.f, 0x220)
fs_access_control_size = read_u32(self.f, 0x224)
service_access_control_offset = read_u32(self.f, 0x228)
service_access_control_size = read_u32(self.f, 0x22C)
kernel_access_control_offset = read_u32(self.f, 0x230)
kernel_access_control_size = read_u32(self.f, 0x234)
self.fs_access_control = FsAccessControl(io.BytesIO(read_at(self.f,
fs_access_control_offset, fs_access_control_size)))
self.service_access_control = ServiceAccessControl(io.BytesIO(read_at(self.f,
service_access_control_offset, service_access_control_size)))
self.kernel_access_control = KernelAccessControl(io.BytesIO(read_at(self.f,
kernel_access_control_offset, kernel_access_control_size)))
class ACI0:
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ' ACI0:\n'
string += ' TitleID: %016x\n' % self.tid
string += '\n' + str(self.fs_access_control)
string += '\n' + str(self.service_access_control)
string += '\n' + str(self.kernel_access_control)
return string
def _parse(self):
if read_at(self.f, 0x0, 0x4) != b'ACI0':
raise ValueError('Invalid ACI0 magic')
self.tid = read_u64(self.f, 0x10)
fs_access_control_offset = read_u32(self.f, 0x20)
fs_access_control_size = read_u32(self.f, 0x24)
service_access_control_offset = read_u32(self.f, 0x28)
service_access_control_size = read_u32(self.f, 0x2C)
kernel_access_control_offset = read_u32(self.f, 0x30)
kernel_access_control_size = read_u32(self.f, 0x34)
self.fs_access_control = FsAccessControl(io.BytesIO(read_at(self.f,
fs_access_control_offset, fs_access_control_size)))
self.service_access_control = ServiceAccessControl(io.BytesIO(read_at(self.f,
service_access_control_offset, service_access_control_size)))
self.kernel_access_control = KernelAccessControl(io.BytesIO(read_at(self.f,
kernel_access_control_offset, kernel_access_control_size)))
class NPDM:
process_categories = {
0: 'Regular title',
1: 'Kernel built-in'
}
def __init__(self, fp):
self.f = fp
self._parse()
def __str__(self):
string = ''
#string += 'NPDM:\n'
string += ' Title name: %s\n' % self.title_name
string += ' Process category: %s\n' % self.process_category
string += ' Product code: %s\n' % self.product_code
string += ' MMU flags: %d\n' % self.mmu_flags
string += ' Main stack thread priority: %d\n' % self.main_thread_priority
string += ' Main thread stack size: 0x%x\n' % self.main_thread_stack_size
string += ' Default CPU ID: %d\n' % self.default_cpu_id
string += ' System resource size: %d\n' % self.resource_size
string += '\n'
string += str(self.acid)
string += '\n'
string += str(self.aci0)
return string
def _parse(self):
if read_at(self.f, 0x0, 0x4) != b'META':
raise ValueError('Invalid META magic')
self.mmu_flags = read_u8(self.f, 0xC)
self.main_thread_priority = read_u8(self.f, 0xE)
self.default_cpu_id = read_u8(self.f, 0xF)
self.resource_size = read_u32(self.f, 0x14)
self.process_category = self.process_categories[read_u32(self.f, 0x18)]
self.main_thread_stack_size = read_u32(self.f, 0x1C)
self.title_name = read_at(self.f, 0x20, 0x30).strip(b'\0').decode()
self.product_code = read_at(self.f, 0x30, 0x40).strip(b'\0').decode()
if self.product_code=='':
self.product_code=0
aci0_offset = read_u32(self.f, 0x70)
aci0_size = read_u32(self.f, 0x74)
acid_offset = read_u32(self.f, 0x78)
acid_size = read_u32(self.f, 0x7C)
self.acid = ACID(io.BytesIO(read_at(self.f, acid_offset, acid_size)))
self.aci0 = ACI0(io.BytesIO(read_at(self.f, aci0_offset, aci0_size)))
def ret(self):
aci0_offset = read_u32(self.f, 0x70)
aci0_size = read_u32(self.f, 0x74)
acid_offset = read_u32(self.f, 0x78)
acid_size = read_u32(self.f, 0x7C)
return read_at(self.f, 0x0, acid_offset+acid_size)
|
4837c21e95fe4e05f7328e0cbedf90b4db29f033
|
559f3dec0964d2e0f86c6c871371fe779cf3726c
|
/contrib/QualityInspector/qinspector/uad/models/stfpm.py
|
3c1d5e50c98dcf6e8e2bf58a00b8a9be932f0fb8
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSeg
|
319ab26665ea492527a1949671650135123ffc39
|
2c8c35a8949fef74599f5ec557d340a14415f20d
|
refs/heads/release/2.8
| 2023-08-31T09:08:06.724717
| 2023-08-18T01:59:56
| 2023-08-18T01:59:56
| 204,380,779
| 8,531
| 1,866
|
Apache-2.0
| 2023-09-12T02:30:42
| 2019-08-26T02:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
stfpm.py
|
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from paddle.vision.models.resnet import resnet18, resnet34, resnet50, resnet101
from qinspector.cvlib.workspace import register
models = {
"resnet18": resnet18,
"resnet34": resnet34,
"resnet50": resnet50,
"resnet101": resnet101,
}
@register
class ResNet_MS3(nn.Layer):
def __init__(self, arch='resnet18', pretrained=True):
super(ResNet_MS3, self).__init__()
assert arch in models.keys(), 'arch {} not supported'.format(arch)
net = models[arch](pretrained=pretrained)
# ignore the last block and fc
self.model = paddle.nn.Sequential(*(list(net.children())[:-2]))
def forward(self, x):
res = []
for name, module in self.model._sub_layers.items():
x = module(x)
if name in ['4', '5', '6']:
res.append(x)
return res
class ResNet_MS3_EXPORT(nn.Layer):
def __init__(self, student, teacher):
super(ResNet_MS3_EXPORT, self).__init__()
self.student = student
self.teacher = teacher
def forward(self, x):
result = []
result.append(self.student(x))
result.append(self.teacher(x))
return result
|
870881a8aeb9c1c8ba3e939e888e583ad56b2a27
|
4724ff27971f185099a27ec04f329d43c962133c
|
/tests/cli/27_test_info.py
|
b16047df3f49a8640b41c482dc4dc886919641d7
|
[
"MIT"
] |
permissive
|
WordOps/WordOps
|
2efef63cbfb9741b7f8b6554bb26d53fdb88bea8
|
64ad38600f98e021e699ee4216d7186197f01b3f
|
refs/heads/master
| 2023-08-19T09:05:03.322182
| 2023-08-15T18:08:15
| 2023-08-15T18:08:15
| 121,243,203
| 1,214
| 242
|
MIT
| 2023-09-12T12:24:49
| 2018-02-12T12:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
27_test_info.py
|
from wo.utils import test
from wo.cli.main import WOTestApp
class CliTestCaseInfo(test.WOTestCase):
def test_wo_cli_info_mysql(self):
with WOTestApp(argv=['info', '--mysql']) as app:
app.run()
def test_wo_cli_info_php(self):
with WOTestApp(argv=['info', '--php']) as app:
app.run()
def test_wo_cli_info_nginx(self):
with WOTestApp(argv=['info', '--nginx']) as app:
app.run()
|
14590521a96abeab9a57953c7b9b797b781efbce
|
7a7e2201642a730460dd4d3b0441df3710898787
|
/PythonWidget/algorithms/search_all_files.py
|
351d6f8eb84c4958deff4f05f4dab540d153bd1c
|
[
"BSD-3-Clause"
] |
permissive
|
xiaodongxiexie/python-widget
|
87118cbd75927f2f181fc5c9ff1a0fbd1c12af27
|
58fd929ee57884a73a1d586c7b891c82b9727f93
|
refs/heads/master
| 2023-04-02T03:13:51.929149
| 2023-03-23T02:17:21
| 2023-03-23T02:17:21
| 89,505,063
| 188
| 55
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
search_all_files.py
|
#coding: utf-8
#使用递归查找指定目录下所有文件,并美化输出
import os
import glob
def searchFile(root_path,flag=0):
if glob.glob(os.path.join(root_path, '*')):
for x in glob.glob(os.path.join(root_path, '*')):
if flag == 1:
print '\t\t', x
else:
print x
searchFile(x, flag=1)
#输出指定目录下所有文件大小总和,可选为MB或者GB
def check_memory(path, style='M'):
i = 0
for dirpath, dirname, filename in os.walk(path):
for ii in filename:
i += os.path.getsize(os.path.join(dirpath,ii))
if style == 'M':
memory = i / 1024. / 1024.
print memory
return memory
else:
memory = i / 1024. / 1024./ 1024.
print memory
return memory
import ctypes
import os
import platform
import sys
#获取剩余空间内存大小
def get_free_space_mb(folder):
""" Return folder/drive free space (in bytes)
"""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value/1024/1024/1024
else:
st = os.statvfs(folder)
return st.f_bavail * st.f_frsize/1024/1024
if __name__ == '__main__':
searchFile('.')
check_memory('.', '')
get_free_space_mb('.')
|
13edc81d2fcb936b86378291dccdc18e73458dfc
|
80d505489f5354d4b29156d6eea7e3516162bcc7
|
/exercises/practice/say/.meta/example.py
|
95d6d44770fd6035b42f5196213ac2aa958e2f1a
|
[
"Python-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
exercism/python
|
419e89690070eef42fc4c932faa0df0706d5c222
|
1e71b8a00c8b34c251d785f0a10843efc5234994
|
refs/heads/main
| 2023-08-29T03:18:02.845245
| 2023-08-25T12:50:16
| 2023-08-25T12:50:16
| 17,274,389
| 1,588
| 1,513
|
MIT
| 2023-09-14T20:33:13
| 2014-02-28T03:48:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
example.py
|
def say(number):
small = dict(enumerate((
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen')))
tens = {20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty',
60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
kilo = 1e3
mega = 1e6
giga = 1e9
tera = 1e12
if number < 0:
raise ValueError('input out of range')
if number >= tera:
raise ValueError('input out of range')
if number < 20:
return small[number]
if number < 100:
if number % 10 == 0:
return tens[number]
return tens[number // 10 * 10] + '-' + small[number % 10]
if number < kilo:
if number % 100 == 0:
return small[number // 100] + ' hundred'
return small[number // 100] + ' hundred ' + say(number % 100)
if number < mega:
if number % kilo == 0:
return say(number // kilo) + ' thousand'
return say(number // kilo) + ' thousand ' + say(number % kilo)
if number < giga:
if number % mega == 0:
return say(number // mega) + ' million'
return say(number // mega) + ' million ' + say(number % mega)
if number % giga == 0:
return say(number // giga) + ' billion'
return say(number // giga) + ' billion ' + say(number % giga)
|
0107eb09026a318b5a64191fa21d1bfe2db1e398
|
6bbadf1d6c8f23cd08d643324003d9c46e99bae2
|
/filterpy/examples/__init__.py
|
e8bd52ef77c623dc8d2ce76e0366df83bae11367
|
[
"MIT"
] |
permissive
|
rlabbe/filterpy
|
83293ffc05e10a9ec6dc06ff22006dadb3285ff3
|
3b51149ebcff0401ff1e10bf08ffca7b6bbc4a33
|
refs/heads/master
| 2023-08-23T02:15:53.525750
| 2022-08-22T18:21:12
| 2022-08-22T18:21:12
| 21,843,083
| 2,957
| 661
|
MIT
| 2022-09-22T16:35:35
| 2014-07-15T02:15:19
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
#pylint: disable=wildcard-import
""" Contains various example, mostly very outdated now."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__ = ["radar_sim"]
from .radar_sim import *
|
3547b40a30d474b6a6faeaa6bac7ed964bc6ad5a
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/681_Next_Closest_Time.py
|
b9360823d19f414d38cea95220bdabe8d38ff6f1
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
681_Next_Closest_Time.py
|
import bisect
# My initial solution. Not correct solution
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
digits = list(time)
digits.pop(2)
sortedDigits = sorted(digits)
for idx in range(len(digits) - 1, -1, -1):
digit = digits[idx]
nextPossibleDigitIdx = bisect.bisect_right(sortedDigits, digit)
if nextPossibleDigitIdx >= len(digits):
continue
if idx == 3:
digits[3] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 2 and int(sortedDigits[nextPossibleDigitIdx]) < 6:
digits[2] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 1:
if int(digits[0]) < 2:
digits[1] = sortedDigits[nextPossibleDigitIdx]
break
elif int(digits[0]) == 2 and int(sortedDigits[nextPossibleDigitIdx]) < 4:
digits[1] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 0:
if int(sortedDigits[nextPossibleDigitIdx]) < 3:
digits[0] = sortedDigits[nextPossibleDigitIdx]
break
else:
digits[1] = digits[0]
digits[2] = digits[0]
digits[3] = digits[0]
hours = digits[0:2]
minuites = digits[2:]
return "".join(hours) + ":" + "".join(minuites)
# https://tinyurl.com/vupwnhw
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
hour, minuite = time.split(":")
# Generate all possible 2 digit values
# There are at most 16 sorted values here
digits = sorted(set(hour + minuite))
twoDigitValues = [a+b for a in digits for b in digits]
# Check if the next valid minute is within the hour
minuiteIndex = twoDigitValues.index(minuite)
if minuiteIndex + 1 < len(twoDigitValues) and twoDigitValues[minuiteIndex + 1] < "60":
return hour + ":" + twoDigitValues[minuiteIndex + 1]
# Check if the next valid hour is within the day
hourIndex = twoDigitValues.index(hour)
if hourIndex + 1 < len(twoDigitValues) and twoDigitValues[hourIndex + 1] < "24":
return twoDigitValues[hourIndex + 1] + ":" + twoDigitValues[0]
# Return the earliest time of the next day
return twoDigitValues[0] + ":" + twoDigitValues[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.