blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95de3b65d52623abb3de954098fd277c94d85626
|
263170e7dca79883314273bb35aef1449e018361
|
/tests/fixtures/linecoverage/setter_getter.py
|
ca3010d26b771110af55b11016cad85f7871d249
|
[
"CC-BY-4.0",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
se2p/pynguin
|
029cfd9c43c08a2f687a816749828054e409646e
|
cc083252c7054824bfaf200533a8b7ad45f7c4fb
|
refs/heads/main
| 2023-08-23T16:58:04.568755
| 2023-08-18T13:11:44
| 2023-08-18T13:11:44
| 282,944,472
| 1,223
| 65
|
MIT
| 2023-08-18T13:12:29
| 2020-07-27T15:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 308
|
py
|
setter_getter.py
|
# This file is part of Pynguin.
#
# SPDX-FileCopyrightText: 2019–2023 Pynguin Contributors
#
# SPDX-License-Identifier: MIT
#
class SetterGetter:
attribute = 0
def setter(self, new_value) -> None:
self.attribute = new_value
def getter(self) -> int:
return self.attribute
|
c46466571d167d5208b28e70eace77d1da61972d
|
ef2d4ed65259b3f614426664939e9fb938715299
|
/darknet/examples/detector.py
|
40bb365e68211c513db9d63847ac95070f5eab98
|
[
"MIT",
"LicenseRef-scancode-yolo-1.0",
"WTFPL",
"LicenseRef-scancode-yolo-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
guanshuicheng/invoice
|
292ab7bd20ab07d4bdf9ca0f60ae2cf0e3bb06a4
|
bd201ed8e977421277775075a107a475bce0490d
|
refs/heads/master
| 2023-06-24T05:20:37.849072
| 2023-06-16T02:56:09
| 2023-06-16T02:56:09
| 200,155,602
| 1,399
| 379
|
MIT
| 2023-03-25T01:35:03
| 2019-08-02T03:06:41
|
C
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
detector.py
|
# Stupid python path shit.
# Instead just add darknet.py to somewhere in your python path
# OK actually that might not be a great idea, idk, work in progress
# Use at your own risk. or don't, i don't care
import sys, os
sys.path.append(os.path.join(os.getcwd(),'python/'))
import darknet as dn
import pdb
dn.set_gpu(0)
net = dn.load_net("cfg/yolo-thor.cfg", "/home/pjreddie/backup/yolo-thor_final.weights", 0)
meta = dn.load_meta("cfg/thor.data")
r = dn.detect(net, meta, "data/bedroom.jpg")
print r
# And then down here you could detect a lot more images like:
r = dn.detect(net, meta, "data/eagle.jpg")
print r
r = dn.detect(net, meta, "data/giraffe.jpg")
print r
r = dn.detect(net, meta, "data/horses.jpg")
print r
r = dn.detect(net, meta, "data/person.jpg")
print r
|
5b64b889c19e908514be254a9c61453177afb907
|
e63ba8150f2596aaf573a8a09f048c6d4478bd3d
|
/lib/python/mod_python/Session.py
|
40b2b53a9d65cc16e5a651e82fe0ec4c68fbc649
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
grisha/mod_python
|
44ec5d1557be4416fb4ecfbc7afefdcc751840e3
|
9db86bca5106b5cf7ceca7645ec0208446c71e25
|
refs/heads/master
| 2023-08-30T10:54:09.295971
| 2023-08-18T17:28:08
| 2023-08-18T17:28:08
| 7,504,434
| 199
| 81
|
Apache-2.0
| 2023-04-21T00:58:56
| 2013-01-08T16:00:35
|
C
|
UTF-8
|
Python
| false
| false
| 29,136
|
py
|
Session.py
|
# vim: set sw=4 expandtab :
#
# Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Originally developed by Gregory Trubetskoy.
#
import sys
PY2 = sys.version[0] == '2'
if PY2:
import apache, Cookie
import md5
import anydbm as dbm
from whichdb import whichdb
from cPickle import load, loads, dump, dumps
from cStringIO import StringIO
else:
from . import apache, Cookie
from hashlib import md5
import dbm
from dbm import whichdb
from io import StringIO
from pickle import load, loads, dump, dumps
import _apache
import os
import stat
import time
import random
import tempfile
import traceback
import re
COOKIE_NAME="pysid"
DFT_TIMEOUT=30*60 # 30 min
DFT_LOCK = True
CLEANUP_CHANCE=1000 # cleanups have 1 in CLEANUP_CHANCE chance
tempdir = tempfile.gettempdir()
def md5_hash(s):
if PY2:
return md5.new(s).hexdigest()
else:
if isinstance(s, str):
s = s.encode('latin1')
return md5(s).hexdigest()
def _init_rnd():
""" initialize random number generators
this is key in multithreaded env, see
python docs for random """
# query max number of threads
if _apache.mpm_query(apache.AP_MPMQ_IS_THREADED):
gennum = _apache.mpm_query(apache.AP_MPMQ_MAX_SPARE_THREADS)
else:
gennum = 10
# make generators
# this bit is from Python lib reference
g = random.Random(time.time())
result = [g]
for i in range(gennum - 1):
laststate = g.getstate()
g = random.Random()
g.setstate(laststate)
result.append(g)
return result
rnd_gens = _init_rnd()
rnd_iter = iter(rnd_gens)
def _get_generator():
# get rnd_iter.next(), or start over
# if we reached the end of it
global rnd_iter
try:
return next(rnd_iter)
except StopIteration:
# the small potential for two threads doing this
# seems does not warrant use of a lock
rnd_iter = iter(rnd_gens)
return next(rnd_iter)
def _new_sid(req):
# Make a number based on current time, pid, remote ip
# and two random ints, then hash with md5. This should
# be fairly unique and very difficult to guess.
#
# WARNING
# The current implementation of _new_sid returns an
# md5 hexdigest string. To avoid a possible directory traversal
# attack in FileSession the sid is validated using
# the _check_sid() method and the compiled regex
# validate_sid_re. The sid will be accepted only if len(sid) == 32
# and it only contains the characters 0-9 and a-f.
#
# If you change this implementation of _new_sid, make sure to also
# change the validation scheme, as well as the test_Session_illegal_sid()
# unit test in test/test.py.
# /WARNING
t = int(time.time()*10000)
pid = os.getpid()
g = _get_generator()
rnd1 = g.randint(0, 999999999)
rnd2 = g.randint(0, 999999999)
ip = req.connection.client_ip
return md5_hash("%d%d%d%d%s" % (t, pid, rnd1, rnd2, ip))
validate_sid_re = re.compile('[0-9a-f]{32}$')
def _check_sid(sid):
## Check the validity of the session id
# # The sid must be 32 characters long, and consisting of the characters
# 0-9 and a-f.
#
# The sid may be passed in a cookie from the client and as such
# should not be trusted. This is particularly important in
# FileSession, where the session filename is derived from the sid.
# A sid containing '/' or '.' characters could result in a directory
# traversal attack
return not not validate_sid_re.match(sid)
class BaseSession(dict):
def __init__(self, req, sid=None, secret=None, lock=1,
timeout=0):
self._req, self._sid, self._secret = req, sid, secret
self._lock = lock
self._new = 1
self._created = 0
self._accessed = 0
self._timeout = 0
self._locked = 0
self._invalid = 0
dict.__init__(self)
config = req.get_options()
if "mod_python.session.cookie_name" in config:
session_cookie_name = config.get("mod_python.session.cookie_name", COOKIE_NAME)
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
session_cookie_name = config.get("session_cookie_name", COOKIE_NAME)
if not self._sid:
# check to see if cookie exists
if secret:
cookie = Cookie.get_cookie(req, session_cookie_name,
Class=Cookie.SignedCookie,
secret=self._secret,
mismatch=Cookie.Cookie.IGNORE)
else:
cookie = Cookie.get_cookie(req, session_cookie_name)
if cookie:
self._sid = cookie.value
if self._sid:
if not _check_sid(self._sid):
if sid:
# Supplied explicitly by user of the class,
# raise an exception and make the user code
# deal with it.
raise ValueError("Invalid Session ID: sid=%s" % sid)
else:
# Derived from the cookie sent by browser,
# wipe it out so it gets replaced with a
# correct value.
self._sid = None
self.init_lock()
if self._sid:
# attempt to load ourselves
self.lock()
if self.load():
self._new = 0
if self._new:
# make a new session
if self._sid: self.unlock() # unlock old sid
self._sid = _new_sid(self._req)
self.lock() # lock new sid
Cookie.add_cookie(self._req, self.make_cookie())
self._created = time.time()
if timeout:
self._timeout = timeout
else:
self._timeout = DFT_TIMEOUT
self._accessed = time.time()
# need cleanup?
if random.randint(1, CLEANUP_CHANCE) == 1:
self.cleanup()
def make_cookie(self):
config = self._req.get_options()
if "mod_python.session.cookie_name" in config:
session_cookie_name = config.get("mod_python.session.cookie_name", COOKIE_NAME)
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
session_cookie_name = config.get("session_cookie_name", COOKIE_NAME)
if self._secret:
c = Cookie.SignedCookie(session_cookie_name, self._sid,
secret=self._secret)
else:
c = Cookie.Cookie(session_cookie_name, self._sid)
if "mod_python.session.application_domain" in config:
c.domain = config["mod_python.session.application_domain"]
if "mod_python.session.application_path" in config:
c.path = config["mod_python.session.application_path"]
elif "ApplicationPath" in config:
# For backwards compatability with versions
# of mod_python prior to 3.3.
c.path = config["ApplicationPath"]
else:
# the path where *Handler directive was specified
dirpath = self._req.hlist.directory
if dirpath:
docroot = self._req.document_root()
c.path = dirpath[len(docroot):]
else:
c.path = '/'
# Sometimes there is no path, e.g. when Location
# is used. When Alias or UserDir are used, then
# the path wouldn't match the URI. In those cases
# just default to '/'
if not c.path or not self._req.uri.startswith(c.path):
c.path = '/'
return c
def invalidate(self):
c = self.make_cookie()
c.expires = 0
Cookie.add_cookie(self._req, c)
self.delete()
self._invalid = 1
def load(self):
dict = self.do_load()
if dict == None:
return 0
if (time.time() - dict["_accessed"]) > dict["_timeout"]:
return 0
self._created = dict["_created"]
self._accessed = dict["_accessed"]
self._timeout = dict["_timeout"]
self.update(dict["_data"])
return 1
def save(self):
if not self._invalid:
dict = {"_data" : self.copy(),
"_created" : self._created,
"_accessed": self._accessed,
"_timeout" : self._timeout}
self.do_save(dict)
def delete(self):
self.do_delete()
self.clear()
def init_lock(self):
pass
def lock(self):
if self._lock:
_apache._global_lock(self._req.server, self._sid)
self._locked = 1
self._req.register_cleanup(unlock_session_cleanup, self)
def unlock(self):
if self._lock and self._locked:
_apache._global_unlock(self._req.server, self._sid)
self._locked = 0
def is_new(self):
return not not self._new
def id(self):
return self._sid
def created(self):
return self._created
def last_accessed(self):
return self._accessed
def timeout(self):
return self._timeout
def set_timeout(self, secs):
self._timeout = secs
def cleanup(self):
self.do_cleanup()
def __del__(self):
self.unlock()
def unlock_session_cleanup(sess):
sess.unlock()
###########################################################################
## DbmSession
def dbm_cleanup(data):
filename, server = data
_apache._global_lock(server, None, 0)
db = dbm.open(filename, 'c')
try:
old = []
s = db.first()
while 1:
key, val = s
dict = loads(val)
try:
if (time.time() - dict["_accessed"]) > dict["_timeout"]:
old.append(key)
except KeyError:
old.append(key)
try:
s = next(db)
except KeyError: break
for key in old:
try:
del db[key]
except: pass
finally:
db.close()
_apache._global_unlock(server, None, 0)
class DbmSession(BaseSession):
def __init__(self, req, dbm=None, sid=0, secret=None, dbmtype=dbm,
timeout=0, lock=1):
if not dbm:
opts = req.get_options()
if "mod_python.dbm_session.database_filename" in opts:
dbm = opts["mod_python.dbm_session.database_filename"]
elif "session_dbm" in opts:
# For backwards compatability with versions
# of mod_python prior to 3.3.
dbm = opts["session_dbm"]
elif "mod_python.dbm_session.database_directory" in opts:
dbm = os.path.join(opts.get('mod_python.dbm_session.database_directory', tempdir), 'mp_sess.dbm')
elif "mod_python.session.database_directory" in opts:
dbm = os.path.join(opts.get('mod_python.session.database_directory', tempdir), 'mp_sess.dbm')
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
dbm = os.path.join(opts.get('session_directory', tempdir), 'mp_sess.dbm')
self._dbmfile = dbm
self._dbmtype = dbmtype
BaseSession.__init__(self, req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
def _set_dbm_type(self):
module = whichdb(self._dbmfile)
if module:
self._dbmtype = __import__(module)
def _get_dbm(self):
result = self._dbmtype.open(self._dbmfile, 'c', stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
if self._dbmtype is dbm:
self._set_dbm_type()
return result
def do_cleanup(self):
data = [self._dbmfile, self._req.server]
self._req.register_cleanup(dbm_cleanup, data)
self._req.log_error("DbmSession: registered database cleanup.",
apache.APLOG_NOTICE)
def do_load(self):
_apache._global_lock(self._req.server, None, 0)
dbm = self._get_dbm()
try:
if self._sid.encode() in dbm:
return loads(dbm[self._sid.encode()])
else:
return None
finally:
dbm.close()
_apache._global_unlock(self._req.server, None, 0)
def do_save(self, dict):
_apache._global_lock(self._req.server, None, 0)
dbm = self._get_dbm()
try:
dbm[self._sid.encode()] = dumps(dict)
finally:
dbm.close()
_apache._global_unlock(self._req.server, None, 0)
def do_delete(self):
_apache._global_lock(self._req.server, None, 0)
dbm = self._get_dbm()
try:
try:
del dbm[self._sid.encode()]
except KeyError: pass
finally:
dbm.close()
_apache._global_unlock(self._req.server, None, 0)
###########################################################################
## FileSession
DFT_FAST_CLEANUP = True
DFT_VERIFY_CLEANUP = True
DFT_GRACE_PERIOD = 240
DFT_CLEANUP_TIME_LIMIT = 2
# Credits : this was initially contributed by dharana <dharana@dharana.net>
class FileSession(BaseSession):
def __init__(self, req, sid=0, secret=None, timeout=0, lock=1,
fast_cleanup=-1, verify_cleanup=-1):
opts = req.get_options()
if fast_cleanup == -1:
if 'mod_python.file_session.enable_fast_cleanup' in opts:
self._fast_cleanup = true_or_false(opts.get('mod_python.file_session.enable_fast_cleanup', DFT_FAST_CLEANUP))
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
self._fast_cleanup = true_or_false(opts.get('session_fast_cleanup', DFT_FAST_CLEANUP))
else:
self._fast_cleanup = fast_cleanup
if verify_cleanup == -1:
if 'mod_python.file_session.verify_session_timeout' in opts:
self._verify_cleanup = true_or_false(opts.get('mod_python.file_session.verify_session_timeout', DFT_VERIFY_CLEANUP))
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
self._verify_cleanup = true_or_false(opts.get('session_verify_cleanup', DFT_VERIFY_CLEANUP))
else:
self._verify_cleanup = verify_cleanup
if 'mod_python.file_session.cleanup_grace_period' in opts:
self._grace_period = int(opts.get('mod_python.file_session.cleanup_grace_period', DFT_GRACE_PERIOD))
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
self._grace_period = int(opts.get('session_grace_period', DFT_GRACE_PERIOD))
if 'mod_python.file_session.cleanup_time_limit' in opts:
self._cleanup_time_limit = int(opts.get('mod_python.file_session.cleanup_time_limit',DFT_CLEANUP_TIME_LIMIT))
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
self._cleanup_time_limit = int(opts.get('session_cleanup_time_limit',DFT_CLEANUP_TIME_LIMIT))
if 'mod_python.file_session.database_directory' in opts:
self._sessdir = os.path.join(opts.get('mod_python.file_session.database_directory', tempdir), 'mp_sess')
elif 'mod_python.session.database_directory' in opts:
self._sessdir = os.path.join(opts.get('mod_python.session.database_directory', tempdir), 'mp_sess')
else:
# For backwards compatability with versions
# of mod_python prior to 3.3.
self._sessdir = os.path.join(opts.get('session_directory', tempdir), 'mp_sess')
# FIXME
if timeout:
self._cleanup_timeout = timeout
else:
self._cleanup_timeout = DFT_TIMEOUT
BaseSession.__init__(self, req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
def do_cleanup(self):
data = {'req':self._req,
'sessdir':self._sessdir,
'fast_cleanup':self._fast_cleanup,
'verify_cleanup':self._verify_cleanup,
'timeout':self._cleanup_timeout,
'grace_period':self._grace_period,
'cleanup_time_limit': self._cleanup_time_limit,
}
self._req.register_cleanup(filesession_cleanup, data)
self._req.log_error("FileSession: registered filesession cleanup.",
apache.APLOG_NOTICE)
def do_load(self):
self.lock_file()
try:
try:
path = os.path.join(self._sessdir, self._sid[0:2])
filename = os.path.join(path, self._sid)
fp = open(filename,'rb')
try:
data = load(fp)
if (time.time() - data["_accessed"]) <= data["_timeout"]:
# Change the file access time to the current time so the
# cleanup does not delete this file before the request
# can save it's session data
os.utime(filename,None)
return data
finally:
fp.close()
except:
s = StringIO()
traceback.print_exc(file=s)
s = s.getvalue()
self._req.log_error('Error while loading a session : %s'%s)
return None
finally:
self.unlock_file()
def do_save(self, dict):
self.lock_file()
try:
try:
path = os.path.join(self._sessdir, self._sid[0:2])
if not os.path.exists(path):
make_filesession_dirs(self._sessdir)
filename = os.path.join(path, self._sid)
fp = open(filename, 'wb')
try:
dump(dict, fp, 2)
finally:
fp.close()
except:
s = StringIO()
traceback.print_exc(file=s)
s = s.getvalue()
self._req.log_error('Error while saving a session : %s'%s)
finally:
self.unlock_file()
def do_delete(self):
self.lock_file()
try:
try:
path = os.path.join(self._sessdir, self._sid[0:2])
filename = os.path.join(path, self._sid)
os.unlink(filename)
except Exception:
pass
finally:
self.unlock_file()
def lock_file(self):
# self._lock = 1 indicates that session locking is turned on,
# so let BaseSession handle it.
# Otherwise, explicitly acquire a lock for the file manipulation.
if not self._locked:
_apache._global_lock(self._req.server, self._sid)
self._locked = 1
def unlock_file(self):
if self._locked and not self._lock:
_apache._global_unlock(self._req.server, self._sid)
self._locked = 0
FS_STAT_VERSION = 'MPFS_3.2'
def filesession_cleanup(data):
# There is a small chance that a the cleanup for a given session file
# may occur at the exact time that the session is being accessed by
# another request. It is possible under certain circumstances for that
# session file to be saved in another request only to immediately deleted
# by the cleanup. To avoid this race condition, a session is allowed a
# grace_period before it is considered for deletion by the cleanup.
# As long as the grace_period is longer that the time it takes to complete
# the request (which should normally be less than 1 second), the session will
# not be mistakenly deleted by the cleanup. By doing this we also avoid the
# need to lock individual sessions and bypass any potential deadlock
# situations.
req = data['req']
sessdir = data['sessdir']
fast_cleanup = data['fast_cleanup']
verify_cleanup = data['verify_cleanup']
timeout = data['timeout']
grace_period = data['grace_period']
cleanup_time_limit = data['cleanup_time_limit']
req.log_error('FileSession cleanup: (fast=%s, verify=%s) ...'
% (fast_cleanup,verify_cleanup),
apache.APLOG_NOTICE)
lockfile = os.path.join(sessdir,'.mp_sess.lck')
try:
lockfp = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o660)
except:
# check if it's a stale lockfile
mtime = os.stat(lockfile).st_mtime
if mtime < (time.time() - 3600):
# lockfile is over an hour old so it's likely stale.
# Even though there may not be another cleanup process running,
# we are going to defer running the cleanup at this time.
# Short circuiting this cleanup just makes the code a little cleaner.
req.log_error('FileSession cleanup: stale lockfile found - deleting it',
apache.APLOG_NOTICE)
# Remove the stale lockfile so the next call to filesession_cleanup
# can proceed.
os.remove(lockfile)
else:
req.log_error('FileSession cleanup: another process is already running',
apache.APLOG_NOTICE)
return
try:
status_file = open(os.path.join(sessdir, 'fs_status.txt'), 'r')
d = status_file.readline()
status_file.close()
if not d.startswith(FS_STAT_VERSION):
raise Exception('wrong status file version')
parts = d.split()
stat_version = parts[0]
next_i = int(parts[1])
expired_file_count = int(parts[2])
total_file_count = int(parts[3])
total_time = float(parts[4])
except:
stat_version = FS_STAT_VERSION
next_i = 0
expired_file_count = 0
total_file_count = 0
total_time = 0.0
try:
start_time = time.time()
filelist = os.listdir(sessdir)
dir_index = list(range(0,256))[next_i:]
for i in dir_index:
path = '%s/%s' % (sessdir,'%02x' % i)
if not os.path.exists(path):
continue
filelist = os.listdir(path)
total_file_count += len(filelist)
for f in filelist:
try:
filename = os.path.join(path,f)
if fast_cleanup:
accessed = os.stat(filename).st_mtime
if time.time() - accessed < (timeout + grace_period):
continue
if fast_cleanup and not verify_cleanup:
delete_session = True
else:
try:
fp = open(filename)
dict = load(fp)
if (time.time() - dict['_accessed']) > (dict['_timeout'] + grace_period):
delete_session = True
else:
delete_session = False
finally:
fp.close()
if delete_session:
os.unlink(filename)
expired_file_count += 1
except:
s = StringIO()
traceback.print_exc(file=s)
s = s.getvalue()
req.log_error('FileSession cleanup error: %s'
% (s),
apache.APLOG_NOTICE)
next_i = (i + 1) % 256
time_used = time.time() - start_time
if (cleanup_time_limit > 0) and (time_used > cleanup_time_limit):
break
total_time += time.time() - start_time
if next_i == 0:
# next_i can only be 0 when the full cleanup has run to completion
req.log_error("FileSession cleanup: deleted %d of %d in %.4f seconds"
% (expired_file_count, total_file_count, total_time),
apache.APLOG_NOTICE)
expired_file_count = 0
total_file_count = 0
total_time = 0.0
else:
req.log_error("FileSession cleanup incomplete: next cleanup will start at index %d (%02x)"
% (next_i, next_i),
apache.APLOG_NOTICE)
status_file = open(os.path.join(sessdir, 'fs_status.txt'), 'w')
status_file.write('%s %d %d %d %f\n' % (stat_version, next_i, expired_file_count, total_file_count, total_time))
status_file.close()
try:
os.unlink(lockfile)
except:
pass
finally:
os.close(lockfp)
def make_filesession_dirs(sess_dir):
"""Creates the directory structure used for storing session files"""
for i in range(0,256):
path = os.path.join(sess_dir, '%02x' % i)
if not os.path.exists(path):
os.makedirs(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
###########################################################################
## MemorySession
def mem_cleanup(sdict):
for sid in list(sdict.keys()):
try:
session = sdict[sid]
if (time.time() - session["_accessed"]) > session["_timeout"]:
del sdict[sid]
except:
pass
class MemorySession(BaseSession):
sdict = {}
def __init__(self, req, sid=0, secret=None, timeout=0, lock=1):
BaseSession.__init__(self, req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
def do_cleanup(self):
self._req.register_cleanup(mem_cleanup, MemorySession.sdict)
self._req.log_error("MemorySession: registered session cleanup.",
apache.APLOG_NOTICE)
def do_load(self):
if self._sid in MemorySession.sdict:
return MemorySession.sdict[self._sid]
return None
def do_save(self, dict):
MemorySession.sdict[self._sid] = dict
def do_delete(self):
try:
del MemorySession.sdict[self._sid]
except KeyError: pass
###########################################################################
## Session
def Session(req, sid=0, secret=None, timeout=0, lock=1):
opts = req.get_options()
# Check the apache config for the type of session
if 'mod_python.session.session_type' in opts:
sess_type = opts['mod_python.session.session_type']
elif 'session' in opts:
# For backwards compatability with versions
# of mod_python prior to 3.3.
sess_type = opts['session']
else:
# no session class in config so get the default for the platform
threaded = _apache.mpm_query(apache.AP_MPMQ_IS_THREADED)
forked = _apache.mpm_query(apache.AP_MPMQ_IS_FORKED)
daemons = _apache.mpm_query(apache.AP_MPMQ_MAX_DAEMONS)
if (threaded and ((not forked) or (daemons == 1))):
sess_type = 'MemorySession'
else:
sess_type = 'DbmSession'
if sess_type == 'FileSession':
sess = FileSession
elif sess_type == 'DbmSession':
sess = DbmSession
elif sess_type == 'MemorySession':
sess = MemorySession
else:
# TODO Add capability to load a user defined class
# For now, just raise an exception.
raise Exception('Unknown session type %s' % sess_type)
return sess(req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
## helper functions
def true_or_false(item):
"""This function is used to assist in getting appropriate
values set with the PythonOption directive
"""
try:
item = item.lower()
except:
pass
if item in ['yes','true', '1', 1, True]:
return True
elif item in ['no', 'false', '0', 0, None, False]:
return False
else:
raise Exception
|
eaf1bfebe6488b2e48fe3ff84667281257b16ec9
|
0d105ee427502ab0d51d1330e13588a1a52bb500
|
/termsandconditions/decorators.py
|
895786924f1e9228cd40bd8c64ba8e71ae4d52c9
|
[
"BSD-3-Clause"
] |
permissive
|
cyface/django-termsandconditions
|
959ada3a3f30b43c829afcf5b8c6afed03102d72
|
baa6305bcda0d326deae1fdbba16f506ba6e0b43
|
refs/heads/main
| 2023-08-31T03:55:23.363179
| 2023-08-29T14:11:52
| 2023-08-29T14:21:45
| 4,911,554
| 107
| 63
|
BSD-3-Clause
| 2023-09-13T13:02:08
| 2012-07-05T17:21:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
decorators.py
|
"""View Decorators for termsandconditions module"""
from urllib.parse import urlparse, urlunparse
from functools import wraps
from django.http import HttpResponseRedirect, QueryDict
from .models import TermsAndConditions
from .middleware import ACCEPT_TERMS_PATH
def terms_required(view_func):
"""
This decorator checks to see if the user is logged in, and if so, if they have accepted the site terms.
"""
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
"""Method to wrap the view passed in"""
# If user has not logged in, or if they have logged in and already agreed to the terms, let the view through
if (
not request.user.is_authenticated
or not TermsAndConditions.get_active_terms_not_agreed_to(request.user)
):
return view_func(request, *args, **kwargs)
# Otherwise, redirect to terms accept
current_path = request.path
login_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring["returnTo"] = current_path
login_url_parts[4] = querystring.urlencode(safe="/")
return HttpResponseRedirect(urlunparse(login_url_parts))
return _wrapped_view
|
cff12da0030e00e9772a4103be746fea458ba0fb
|
f4eaf2e121bc6238c16a788767f4836505f954b0
|
/crawler/code_base/csxbot-0.3/citeseerx_crawl/urls.py
|
5030f779d84aeae051d64b3b95e0d482c12c7cde
|
[
"Apache-2.0"
] |
permissive
|
SeerLabs/CiteSeerX
|
5abd92bb16e0275ef3a91c29790c59ea04527e4d
|
49ecb503fb1ced8e2c2e94c3e100e5d4dc410ea6
|
refs/heads/master
| 2021-04-12T03:53:59.996465
| 2019-11-25T18:29:20
| 2019-11-25T18:29:20
| 11,351,492
| 114
| 53
|
NOASSERTION
| 2019-11-25T18:29:21
| 2013-07-11T19:51:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
urls.py
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^citeseerx_crawl/', include('citeseerx_crawl.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^$', 'django.views.generic.simple.direct_to_template', {'template': 'index.htm'}),
# submission
(r'^submit/','citeseerx_crawl.main_crawl.submission.handle_submission'),
(r'^submit_pub/','citeseerx_crawl.main_crawl.submission.handle_submission_pub'),
(r'^captcha/',include('captcha.urls')),
(r'^tracking/parent/(?P<pid>\d+)/$','citeseerx_crawl.main_crawl.submission.tracking_parent'),
(r'^tracking/sub/(?P<sid>\d+)/$','citeseerx_crawl.main_crawl.submission.tracking_sub'),
(r'^sub/stat/$', 'citeseerx_crawl.main_crawl.submission.sub_stat'),
(r'^sub/stat/data/$', 'citeseerx_crawl.main_crawl.submission.sub_stat_data'),
# query
(r'^query/','citeseerx_crawl.main_crawl.query.handle_query'),
# ranking
(r'^country_ndocs_rank/$','citeseerx_crawl.main_crawl.ranking.country_ndocs_rank'),
(r'^(?P<group_by>[a-z]+)_ndocs_rank/','citeseerx_crawl.main_crawl.ranking.ndocs_rank'),
(r'^(?P<group_by>[a-z]+)_ncites_rank/','citeseerx_crawl.main_crawl.ranking.ncites_rank'),
(r'^(?P<group_by>[a-z]+)_cpd_rank/','citeseerx_crawl.main_crawl.ranking.cpd_rank'),
# api
(r'^api/getdocs.xml$','citeseerx_crawl.main_crawl.api.get_docs_xml'),
(r'^api/setdocs.xml$','citeseerx_crawl.main_crawl.api.set_docs_xml'),
# apisub
(r'^apisub/getdocs.xml$','citeseerx_crawl.main_crawl.api.get_docs_xml'),
(r'^apisub/setdocs.xml$','citeseerx_crawl.main_crawl.api.set_docs_xml'),
# ----------------
(r'^history/(?P<group_by>[a-z]+)/(?P<name>.*)','citeseerx_crawl.main_crawl.views.history'),
(r'^history/','citeseerx_crawl.main_crawl.views.overall_history'),
)
|
c708990119cab887cbeb356efcb98afb0f8d8ecb
|
fba876caecb7a55254cf92434a9a8a629ed47b93
|
/apps/jobs/migrations/0001_squashed_0005_upload_unique_random_filename.py
|
87e325825001bcf4f6dac758584fd954c79fa47b
|
[
"BSD-3-Clause"
] |
permissive
|
Cloud-CV/EvalAI
|
f6eb96509f679cb5765fd4b4a49e5b3f5a5551d6
|
7e3485f2f3c77b146b72cbbc8de1b15bf0dfe0db
|
refs/heads/master
| 2023-09-04T05:03:59.087293
| 2023-08-30T19:26:13
| 2023-08-30T19:26:13
| 71,516,397
| 1,722
| 983
|
NOASSERTION
| 2023-09-07T18:02:48
| 2016-10-21T00:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
0001_squashed_0005_upload_unique_random_filename.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-21 17:01
from __future__ import unicode_literals
import base.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("participants", "0007_add_team_participant"),
("challenges", "0007_rename_test_environment"),
]
operations = [
migrations.CreateModel(
name="Submission",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
(
"status",
models.CharField(
choices=[
("submitted", "submitted"),
("running", "running"),
("failed", "failed"),
("cancelled", "cancelled"),
("finished", "finished"),
("submitting", "submitting"),
],
max_length=30,
),
),
("is_public", models.BooleanField(default=False)),
("submission_number", models.PositiveIntegerField(default=0)),
("download_count", models.IntegerField(default=0)),
("submitted_at", models.DateTimeField(auto_now_add=True)),
("started_at", models.DateTimeField(blank=True, null=True)),
("completed_at", models.DateTimeField(blank=True, null=True)),
(
"when_made_public",
models.DateTimeField(blank=True, null=True),
),
(
"input_file",
models.FileField(
upload_to=base.utils.RandomFileName(
"submission_files/submission"
)
),
),
(
"stdout_file",
models.FileField(
blank=True,
null=True,
upload_to=base.utils.RandomFileName(
"submission_files/submission"
),
),
),
(
"stderr_file",
models.FileField(
blank=True,
null=True,
upload_to=base.utils.RandomFileName(
"submission_files/submission"
),
),
),
(
"challenge_phase",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="submissions",
to="challenges.ChallengePhase",
),
),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"participant_team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="submissions",
to="participants.ParticipantTeam",
),
),
(
"execution_time_limit",
models.PositiveIntegerField(default=300),
),
("output", models.TextField(blank=True, null=True)),
],
options={"db_table": "submission"},
)
]
|
9810fac2577cc5f535f7097f3d86a395a38ac1b4
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff_python_formatter/resources/test/fixtures/black/simple_cases/pep_604.py
|
2ff5ca48298eae2814eb0e13027dc21dc49e07b7
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
pep_604.py
|
def some_very_long_name_function() -> my_module.Asdf | my_module.AnotherType | my_module.YetAnotherType | None:
pass
def some_very_long_name_function() -> my_module.Asdf | my_module.AnotherType | my_module.YetAnotherType | my_module.EvenMoreType | None:
pass
|
b2507f4d39efa2c008cafdfa44380beeb7bb5c63
|
6f1f9107ed033fb189f5ed999ba7c356cb15b2bf
|
/ch14-interact-with-pdf-files/3-challenge-PdfFileSplitter-class.py
|
5357574552778286925a87d6b9c932c9362d7c96
|
[] |
no_license
|
realpython/python-basics-exercises
|
f0b28c73517243950b4ee65e7f1278e889644beb
|
6aa39b8ed915d82060e24dcb691fcc3f133fc1dd
|
refs/heads/master
| 2023-08-19T16:50:51.456898
| 2023-08-02T14:10:24
| 2023-08-02T14:10:24
| 128,671,950
| 958
| 527
| null | 2023-08-02T10:58:41
| 2018-04-08T18:53:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
3-challenge-PdfFileSplitter-class.py
|
# 14.5 - Challenge: PdfFileSplitter Class
# Solution to challenge
from pathlib import Path
from PyPDF2 import PdfFileReader, PdfFileWriter
class PdfFileSplitter:
"""Class for splitting a PDF into two files."""
def __init__(self, pdf_path):
# Open the PDF file with a new PdfFileReader instance
self.pdf_reader = PdfFileReader(pdf_path)
# Initialize the .writer1 and .writer2 attributes to None
self.writer1 = None
self.writer2 = None
def split(self, breakpoint):
"""Split the PDF into two PdfFileWriter instances"""
# Set .writer1 and .writer2 to new PdfFileWriter intances
self.writer1 = PdfFileWriter()
self.writer2 = PdfFileWriter()
# Add all pages up to, but not including, the breakpoint
# to writer1
for page in self.pdf_reader.pages[:breakpoint]:
self.writer1.addPage(page)
# Add all the remaining pages to writer2
for page in self.pdf_reader.pages[breakpoint:]:
self.writer2.addPage(page)
def write(self, filename):
"""Write both PdfFileWriter instances to files"""
# Write the first file to <filename>_1.pdf
with Path(filename + "_1.pdf").open(mode="wb") as output_file:
self.writer1.write(output_file)
# Write the second file to <filename>_2.pdf
with Path(filename + "_2.pdf").open(mode="wb") as output_file:
self.writer2.write(output_file)
# Split the Pride_and_Prejudice.pdf file into two PDFs, the first
# containing the first 150 pages, and the second containing the
# remaining pages.
pdf_splitter = PdfFileSplitter("ch14-interact-with-pdf-files/practice_files/Pride_and_Prejudice.pdf")
pdf_splitter.split(breakpoint=150)
pdf_splitter.write("pride_split")
|
31eca0fd389bbc293b4ce8d97521dc8f4e034761
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/scipy/hausdorff.py
|
ac3bd2cc80117b2bdd8a307546bff426a86c435a
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 2,850
|
py
|
hausdorff.py
|
#
# Copyright (C) Tyler Reddy, Richard Gowers, and Max Linke, 2016
#
# Distributed under the same BSD license as Scipy.
#
# adapted from scipy's cython version
import numpy as np
import numpy.random as random
#pythran export directed_hausdorff(float64[:,:], float64[:,:], int)
#pythran export directed_hausdorff_noshuffle(float64[:,:], float64[:,:])
#runas import numpy as np; x = np.arange((100 * 100.)).reshape(100,-1); y = np.ones((100,100)) * 3; directed_hausdorff_noshuffle(x, y)
def directed_hausdorff(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
# shuffling the points in each array generally increases the likelihood of
# an advantageous break in the inner search loop and never decreases the
# performance of the algorithm
random.seed(seed)
resort1 = np.arange(N1)
resort2 = np.arange(N2)
random.shuffle(resort1)
random.shuffle(resort2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
def directed_hausdorff_noshuffle(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
resort1 = np.arange(N1)
resort2 = np.arange(N2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
|
557a7e7852c17a92d09610fa9fe11046f6f7d2fc
|
ea49dd7d31d2e0b65ce6aadf1274f3bb70abfaf9
|
/problems/0784_Letter_Case_Permutation/washing.py
|
22dfc50ad610bf4fe72c399118829a190af70aeb
|
[] |
no_license
|
yychuyu/LeetCode
|
907a3d7d67ada9714e86103ac96422381e75d683
|
48384483a55e120caf5d8d353e9aa287fce3cf4a
|
refs/heads/master
| 2020-03-30T15:02:12.492378
| 2019-06-19T01:52:45
| 2019-06-19T01:52:45
| 151,345,944
| 134
| 331
| null | 2019-08-01T02:56:10
| 2018-10-03T01:26:28
|
C++
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
washing.py
|
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
str_list = [""]
for i in range(len(S)):
length = len(str_list)
for j in range(length):
str_list.append(str_list[j] + S[i].lower())
try:
int(S[i])
except:
str_list.append(str_list[j] + S[i].upper())
str_list = str_list[length:]
return str_list
|
90081cd8789f50e8998697623c1ea4ccbc0cf740
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-ccm/huaweicloudsdkccm/v1/__init__.py
|
17e33528b55229aa1b0cd1df6b0035e09b7a547b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 11,428
|
py
|
__init__.py
|
# coding: utf-8
from __future__ import absolute_import
from huaweicloudsdkccm.v1.ccm_client import CcmClient
from huaweicloudsdkccm.v1.ccm_async_client import CcmAsyncClient
from huaweicloudsdkccm.v1.model.batch_create_ca_tags_request import BatchCreateCaTagsRequest
from huaweicloudsdkccm.v1.model.batch_create_ca_tags_response import BatchCreateCaTagsResponse
from huaweicloudsdkccm.v1.model.batch_create_cert_tags_request import BatchCreateCertTagsRequest
from huaweicloudsdkccm.v1.model.batch_create_cert_tags_response import BatchCreateCertTagsResponse
from huaweicloudsdkccm.v1.model.batch_delete_ca_tags_request import BatchDeleteCaTagsRequest
from huaweicloudsdkccm.v1.model.batch_delete_ca_tags_response import BatchDeleteCaTagsResponse
from huaweicloudsdkccm.v1.model.batch_delete_cert_tags_request import BatchDeleteCertTagsRequest
from huaweicloudsdkccm.v1.model.batch_delete_cert_tags_response import BatchDeleteCertTagsResponse
from huaweicloudsdkccm.v1.model.batch_operate_tag_request_body import BatchOperateTagRequestBody
from huaweicloudsdkccm.v1.model.cert_distinguished_name import CertDistinguishedName
from huaweicloudsdkccm.v1.model.certificate_authorities import CertificateAuthorities
from huaweicloudsdkccm.v1.model.certificates import Certificates
from huaweicloudsdkccm.v1.model.count_ca_resource_instances_request import CountCaResourceInstancesRequest
from huaweicloudsdkccm.v1.model.count_ca_resource_instances_response import CountCaResourceInstancesResponse
from huaweicloudsdkccm.v1.model.count_cert_resource_instances_request import CountCertResourceInstancesRequest
from huaweicloudsdkccm.v1.model.count_cert_resource_instances_response import CountCertResourceInstancesResponse
from huaweicloudsdkccm.v1.model.create_ca_tag_request import CreateCaTagRequest
from huaweicloudsdkccm.v1.model.create_ca_tag_response import CreateCaTagResponse
from huaweicloudsdkccm.v1.model.create_cert_tag_request import CreateCertTagRequest
from huaweicloudsdkccm.v1.model.create_cert_tag_response import CreateCertTagResponse
from huaweicloudsdkccm.v1.model.create_certificate_authority_obs_agency_request import CreateCertificateAuthorityObsAgencyRequest
from huaweicloudsdkccm.v1.model.create_certificate_authority_obs_agency_response import CreateCertificateAuthorityObsAgencyResponse
from huaweicloudsdkccm.v1.model.create_certificate_authority_request import CreateCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.create_certificate_authority_request_body import CreateCertificateAuthorityRequestBody
from huaweicloudsdkccm.v1.model.create_certificate_authority_response import CreateCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.create_certificate_by_csr_request import CreateCertificateByCsrRequest
from huaweicloudsdkccm.v1.model.create_certificate_by_csr_request_body import CreateCertificateByCsrRequestBody
from huaweicloudsdkccm.v1.model.create_certificate_by_csr_response import CreateCertificateByCsrResponse
from huaweicloudsdkccm.v1.model.create_certificate_request import CreateCertificateRequest
from huaweicloudsdkccm.v1.model.create_certificate_request_body import CreateCertificateRequestBody
from huaweicloudsdkccm.v1.model.create_certificate_response import CreateCertificateResponse
from huaweicloudsdkccm.v1.model.crl_configuration import CrlConfiguration
from huaweicloudsdkccm.v1.model.customized_extension import CustomizedExtension
from huaweicloudsdkccm.v1.model.delete_certificate_authority_request import DeleteCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.delete_certificate_authority_response import DeleteCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.delete_certificate_request import DeleteCertificateRequest
from huaweicloudsdkccm.v1.model.delete_certificate_response import DeleteCertificateResponse
from huaweicloudsdkccm.v1.model.disable_certificate_authority_crl_request import DisableCertificateAuthorityCrlRequest
from huaweicloudsdkccm.v1.model.disable_certificate_authority_crl_response import DisableCertificateAuthorityCrlResponse
from huaweicloudsdkccm.v1.model.disable_certificate_authority_request import DisableCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.disable_certificate_authority_response import DisableCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.distinguished_name import DistinguishedName
from huaweicloudsdkccm.v1.model.domain_tags import DomainTags
from huaweicloudsdkccm.v1.model.enable_certificate_authority_crl_request import EnableCertificateAuthorityCrlRequest
from huaweicloudsdkccm.v1.model.enable_certificate_authority_crl_request_body import EnableCertificateAuthorityCrlRequestBody
from huaweicloudsdkccm.v1.model.enable_certificate_authority_crl_response import EnableCertificateAuthorityCrlResponse
from huaweicloudsdkccm.v1.model.enable_certificate_authority_request import EnableCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.enable_certificate_authority_response import EnableCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.enc_cert_info import EncCertInfo
from huaweicloudsdkccm.v1.model.export_certificate_authority_certificate_request import ExportCertificateAuthorityCertificateRequest
from huaweicloudsdkccm.v1.model.export_certificate_authority_certificate_response import ExportCertificateAuthorityCertificateResponse
from huaweicloudsdkccm.v1.model.export_certificate_authority_csr_request import ExportCertificateAuthorityCsrRequest
from huaweicloudsdkccm.v1.model.export_certificate_authority_csr_response import ExportCertificateAuthorityCsrResponse
from huaweicloudsdkccm.v1.model.export_certificate_request import ExportCertificateRequest
from huaweicloudsdkccm.v1.model.export_certificate_request_body import ExportCertificateRequestBody
from huaweicloudsdkccm.v1.model.export_certificate_response import ExportCertificateResponse
from huaweicloudsdkccm.v1.model.extended_key_usage import ExtendedKeyUsage
from huaweicloudsdkccm.v1.model.import_certificate_authority_certificate_request import ImportCertificateAuthorityCertificateRequest
from huaweicloudsdkccm.v1.model.import_certificate_authority_certificate_request_body import ImportCertificateAuthorityCertificateRequestBody
from huaweicloudsdkccm.v1.model.import_certificate_authority_certificate_response import ImportCertificateAuthorityCertificateResponse
from huaweicloudsdkccm.v1.model.issue_certificate_authority_certificate_request import IssueCertificateAuthorityCertificateRequest
from huaweicloudsdkccm.v1.model.issue_certificate_authority_certificate_request_body import IssueCertificateAuthorityCertificateRequestBody
from huaweicloudsdkccm.v1.model.issue_certificate_authority_certificate_response import IssueCertificateAuthorityCertificateResponse
from huaweicloudsdkccm.v1.model.list_ca_resource_instances_request import ListCaResourceInstancesRequest
from huaweicloudsdkccm.v1.model.list_ca_resource_instances_response import ListCaResourceInstancesResponse
from huaweicloudsdkccm.v1.model.list_ca_tags_request import ListCaTagsRequest
from huaweicloudsdkccm.v1.model.list_ca_tags_response import ListCaTagsResponse
from huaweicloudsdkccm.v1.model.list_cert_resource_instances_request import ListCertResourceInstancesRequest
from huaweicloudsdkccm.v1.model.list_cert_resource_instances_response import ListCertResourceInstancesResponse
from huaweicloudsdkccm.v1.model.list_cert_tags_request import ListCertTagsRequest
from huaweicloudsdkccm.v1.model.list_cert_tags_response import ListCertTagsResponse
from huaweicloudsdkccm.v1.model.list_certificate_authority_obs_bucket_request import ListCertificateAuthorityObsBucketRequest
from huaweicloudsdkccm.v1.model.list_certificate_authority_obs_bucket_response import ListCertificateAuthorityObsBucketResponse
from huaweicloudsdkccm.v1.model.list_certificate_authority_request import ListCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.list_certificate_authority_response import ListCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.list_certificate_request import ListCertificateRequest
from huaweicloudsdkccm.v1.model.list_certificate_response import ListCertificateResponse
from huaweicloudsdkccm.v1.model.list_crl_configuration import ListCrlConfiguration
from huaweicloudsdkccm.v1.model.list_domain_ca_tags_request import ListDomainCaTagsRequest
from huaweicloudsdkccm.v1.model.list_domain_ca_tags_response import ListDomainCaTagsResponse
from huaweicloudsdkccm.v1.model.list_domain_cert_tags_request import ListDomainCertTagsRequest
from huaweicloudsdkccm.v1.model.list_domain_cert_tags_response import ListDomainCertTagsResponse
from huaweicloudsdkccm.v1.model.list_resource_instances_request_body import ListResourceInstancesRequestBody
from huaweicloudsdkccm.v1.model.obs_buckets import ObsBuckets
from huaweicloudsdkccm.v1.model.parse_certificate_signing_request_request import ParseCertificateSigningRequestRequest
from huaweicloudsdkccm.v1.model.parse_certificate_signing_request_request_body import ParseCertificateSigningRequestRequestBody
from huaweicloudsdkccm.v1.model.parse_certificate_signing_request_response import ParseCertificateSigningRequestResponse
from huaweicloudsdkccm.v1.model.quotas import Quotas
from huaweicloudsdkccm.v1.model.resource_tag import ResourceTag
from huaweicloudsdkccm.v1.model.resource_tag_request_body import ResourceTagRequestBody
from huaweicloudsdkccm.v1.model.resources import Resources
from huaweicloudsdkccm.v1.model.restore_certificate_authority_request import RestoreCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.restore_certificate_authority_response import RestoreCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.revoke_certificate_authority_request import RevokeCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.revoke_certificate_authority_response import RevokeCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.revoke_certificate_request import RevokeCertificateRequest
from huaweicloudsdkccm.v1.model.revoke_certificate_request_body import RevokeCertificateRequestBody
from huaweicloudsdkccm.v1.model.revoke_certificate_response import RevokeCertificateResponse
from huaweicloudsdkccm.v1.model.show_certificate_authority_obs_agency_request import ShowCertificateAuthorityObsAgencyRequest
from huaweicloudsdkccm.v1.model.show_certificate_authority_obs_agency_response import ShowCertificateAuthorityObsAgencyResponse
from huaweicloudsdkccm.v1.model.show_certificate_authority_quota_request import ShowCertificateAuthorityQuotaRequest
from huaweicloudsdkccm.v1.model.show_certificate_authority_quota_response import ShowCertificateAuthorityQuotaResponse
from huaweicloudsdkccm.v1.model.show_certificate_authority_request import ShowCertificateAuthorityRequest
from huaweicloudsdkccm.v1.model.show_certificate_authority_response import ShowCertificateAuthorityResponse
from huaweicloudsdkccm.v1.model.show_certificate_quota_request import ShowCertificateQuotaRequest
from huaweicloudsdkccm.v1.model.show_certificate_quota_response import ShowCertificateQuotaResponse
from huaweicloudsdkccm.v1.model.show_certificate_request import ShowCertificateRequest
from huaweicloudsdkccm.v1.model.show_certificate_response import ShowCertificateResponse
from huaweicloudsdkccm.v1.model.subject_alternative_name import SubjectAlternativeName
from huaweicloudsdkccm.v1.model.tag_resource import TagResource
from huaweicloudsdkccm.v1.model.validity import Validity
|
dcde02789d28227555fd9fc709c7e1dc045f3a50
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/_private/runtime_env/uri_cache.py
|
97871baba1cbb5d145de832ecdf693f24906a490
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,236
|
py
|
uri_cache.py
|
import logging
from typing import Set, Callable
default_logger = logging.getLogger(__name__)
DEFAULT_MAX_URI_CACHE_SIZE_BYTES = (1024**3) * 10 # 10 GB
class URICache:
"""
Caches URIs up to a specified total size limit.
URIs are represented by strings. Each URI has an associated size on disk.
When a URI is added to the URICache, it is marked as "in use".
When a URI is no longer in use, the user of this class should call
`mark_unused` to signal that the URI is safe for deletion.
URIs in the cache can be marked as "in use" by calling `mark_used`.
Deletion of URIs on disk does not occur until the size limit is exceeded.
When this happens, URIs that are not in use are deleted randomly until the
size limit is satisfied, or there are no more URIs that are not in use.
It is possible for the total size on disk to exceed the size limit if all
the URIs are in use.
"""
def __init__(
self,
delete_fn: Callable[[str, logging.Logger], int] = lambda uri, logger: 0,
max_total_size_bytes: int = DEFAULT_MAX_URI_CACHE_SIZE_BYTES,
debug_mode: bool = False,
):
# Maps URIs to the size in bytes of their corresponding disk contents.
self._used_uris: Set[str] = set()
self._unused_uris: Set[str] = set()
self._delete_fn = delete_fn
# Total size of both used and unused URIs in the cache.
self._total_size_bytes = 0
self.max_total_size_bytes = max_total_size_bytes
# Used in `self._check_valid()` for testing.
self._debug_mode = debug_mode
def mark_unused(self, uri: str, logger: logging.Logger = default_logger):
"""Mark a URI as unused and okay to be deleted."""
if uri not in self._used_uris:
logger.info(f"URI {uri} is already unused.")
else:
self._unused_uris.add(uri)
self._used_uris.remove(uri)
logger.info(f"Marked URI {uri} unused.")
self._evict_if_needed(logger)
self._check_valid()
def mark_used(self, uri: str, logger: logging.Logger = default_logger):
"""Mark a URI as in use. URIs in use will not be deleted."""
if uri in self._used_uris:
return
elif uri in self._unused_uris:
self._used_uris.add(uri)
self._unused_uris.remove(uri)
else:
raise ValueError(
f"Got request to mark URI {uri} used, but this "
"URI is not present in the cache."
)
logger.info(f"Marked URI {uri} used.")
self._check_valid()
def add(self, uri: str, size_bytes: int, logger: logging.Logger = default_logger):
"""Add a URI to the cache and mark it as in use."""
if uri in self._unused_uris:
self._unused_uris.remove(uri)
self._used_uris.add(uri)
self._total_size_bytes += size_bytes
self._evict_if_needed(logger)
self._check_valid()
logger.info(f"Added URI {uri} with size {size_bytes}")
def get_total_size_bytes(self) -> int:
return self._total_size_bytes
def _evict_if_needed(self, logger: logging.Logger = default_logger):
"""Evict unused URIs (if they exist) until total size <= max size."""
while (
self._unused_uris
and self.get_total_size_bytes() > self.max_total_size_bytes
):
# TODO(architkulkarni): Evict least recently used URI instead
arbitrary_unused_uri = next(iter(self._unused_uris))
self._unused_uris.remove(arbitrary_unused_uri)
num_bytes_deleted = self._delete_fn(arbitrary_unused_uri, logger)
self._total_size_bytes -= num_bytes_deleted
logger.info(
f"Deleted URI {arbitrary_unused_uri} with size " f"{num_bytes_deleted}."
)
def _check_valid(self):
"""(Debug mode only) Check "used" and "unused" sets are disjoint."""
if self._debug_mode:
assert self._used_uris & self._unused_uris == set()
def __contains__(self, uri):
return uri in self._used_uris or uri in self._unused_uris
def __repr__(self):
return str(self.__dict__)
|
23e8cef8ee3cc3bf0f39d8993cdafbe6e4677d6f
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/ir/inference/test_onednn_conv_concat_activation_fuse_pass.py
|
1a71841d22cc089d88d67c8d27dc483a28263e41
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 6,014
|
py
|
test_onednn_conv_concat_activation_fuse_pass.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import hypothesis.strategies as st
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
class TestOneDNNConvConcatActivationFusePass(PassAutoScanTest):
def sample_program_config(self, draw):
data_format = draw(st.sampled_from(['NCHW', 'NHWC']))
dilations = draw(st.sampled_from([[2, 2]]))
padding_algorithm = draw(st.sampled_from(['VALID']))
groups = draw(st.sampled_from([4]))
paddings = draw(st.sampled_from([[0, 3]]))
strides = draw(st.sampled_from([[1, 2]]))
axis = draw(st.sampled_from([0]))
activation_type = draw(
st.sampled_from(
[
'relu',
'gelu',
'swish',
'mish',
'hard_swish',
'sigmoid',
'abs',
'relu6',
'clip',
'tanh',
'hard_sigmoid',
'leaky_relu',
]
)
)
def generate_data(input_type):
if input_type == 'NCHW':
return np.random.random([16, 48, 64, 64]).astype(np.float32)
elif input_type == 'NHWC':
return np.random.random([16, 64, 64, 48]).astype(np.float32)
elif input_type == 'weights':
return np.random.random([16, int(48 / groups), 3, 3]).astype(
np.float32
)
conv2d_op1 = OpConfig(
type='conv2d',
inputs={'Input': ['conv_input_1'], 'Filter': ['conv_weights_1']},
outputs={'Output': ['conv_output_1']},
attrs={
'data_format': data_format,
'dilations': dilations,
'padding_algorithm': padding_algorithm,
'groups': groups,
'paddings': paddings,
'strides': strides,
},
)
conv2d_op2 = OpConfig(
type='conv2d',
inputs={'Input': ['conv_input_2'], 'Filter': ['conv_weights_2']},
outputs={'Output': ['conv_output_2']},
attrs={
'data_format': data_format,
'dilations': dilations,
'padding_algorithm': padding_algorithm,
'groups': groups,
'paddings': paddings,
'strides': strides,
},
)
concat_op = OpConfig(
type='concat',
inputs={'X': ['conv_output_1', 'conv_output_2']},
outputs={'Out': ['concat_output']},
attrs={'axis': axis},
)
if activation_type == 'relu6':
activation_op = OpConfig(
activation_type,
inputs={'X': ['concat_output']},
outputs={'Out': ['activation_output']},
threshold=6.0,
)
elif activation_type == 'leaky_relu':
activation_op = OpConfig(
activation_type,
inputs={'X': ['concat_output']},
outputs={'Out': ['activation_output']},
alpha=draw(st.floats(min_value=0.1, max_value=1.0)),
)
elif activation_type == 'swish':
activation_op = OpConfig(
activation_type,
inputs={'X': ['concat_output']},
outputs={'Out': ['activation_output']},
beta=1.0,
)
elif activation_type == 'clip':
activation_op = OpConfig(
activation_type,
inputs={'X': ['concat_output']},
outputs={'Out': ['activation_output']},
min=draw(st.floats(min_value=0.1, max_value=0.49)),
max=draw(st.floats(min_value=0.5, max_value=1.0)),
)
else:
activation_op = OpConfig(
activation_type,
inputs={'X': ['concat_output']},
outputs={'Out': ['activation_output']},
)
model_net = [conv2d_op1, conv2d_op2, concat_op, activation_op]
program_config = ProgramConfig(
ops=model_net,
inputs={
'conv_input_1': TensorConfig(
data_gen=partial(generate_data, data_format)
),
'conv_input_2': TensorConfig(
data_gen=partial(generate_data, data_format)
),
},
weights={
'conv_weights_1': TensorConfig(
data_gen=partial(generate_data, 'weights')
),
'conv_weights_2': TensorConfig(
data_gen=partial(generate_data, 'weights')
),
},
outputs=['activation_output'],
)
return program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, ['fused_conv2d', 'fused_conv2d', 'concat'], (1e-5, 1e-5)
def test(self):
self.run_and_statis(
quant=False,
passes=['conv_activation_mkldnn_fuse_pass'],
max_examples=50,
)
if __name__ == '__main__':
unittest.main()
|
8132a55de1d7f2a2288085d3e599ea987bd28b5f
|
a5b66100762c0ca7076de26645ef1b732e0ee2d8
|
/python_toolbox/wx_tools/keyboard/keys/win_keys.py
|
766f2294b9d4e2e9646cd267dc47b5ec5f2553d2
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
cool-RR/python_toolbox
|
63400bbc004c63b32fe421b668a64bede4928e90
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
refs/heads/master
| 2022-01-26T14:41:29.194288
| 2021-12-25T06:49:40
| 2021-12-25T06:49:40
| 3,066,283
| 130
| 15
|
NOASSERTION
| 2021-12-25T06:49:41
| 2011-12-29T01:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
win_keys.py
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''This module defines Windows-specific keys.'''
import wx
from ..key import Key
back_keys = (
Key(wx.WXK_LEFT, alt=True),
)
back_key_string = u'Alt-\u00ab'
forward_keys = (
Key(wx.WXK_RIGHT, alt=True),
)
forward_key_string = u'Alt-\u00bb'
|
22b0ebcc541db65a05ef2f979cb118938a565683
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/eventlet-0.30.2/tests/isolated/env_tpool_size.py
|
a34a9c7ca1b9171a58a239745f9252e75b2ffc1c
|
[
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 611
|
py
|
env_tpool_size.py
|
__test__ = False
if __name__ == '__main__':
import sys
import time
from eventlet import tpool
import eventlet
current = [0]
highwater = [0]
def count():
current[0] += 1
time.sleep(0.01)
if current[0] > highwater[0]:
highwater[0] = current[0]
current[0] -= 1
expected = int(sys.argv[1])
normal = int(sys.argv[2])
p = eventlet.GreenPool()
for i in range(expected * 2):
p.spawn(tpool.execute, count)
p.waitall()
assert highwater[0] > normal, "Highwater %s <= %s" % (highwater[0], normal)
print('pass')
|
62e2b260a3c6ab4abf87811acb904be6ce9910ac
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/projects/LinuxSystemOps/SoftwareManagement/nginx/retrieveLatestOpenSSLVersionUrl.py
|
604fd70cc591e2a829f3129f9320b6e3623b9125
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
retrieveLatestOpenSSLVersionUrl.py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:retrieveLatestOpenSSLVersionUrl.py
User: Guodong
Create Date: 2017/3/7
Create Time: 14:25
retrieve, fetch, find latest version of OpenSSL URL
"""
import re
import requests
from bs4 import BeautifulSoup
headers = {
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/54.0.2840.99 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh-TW;q=0.4'
}
downloads_page_url = 'https://www.openssl.org/source/'
file_url_list = list()
latest_version = ""
file_url = ""
content = requests.get(downloads_page_url, headers=headers)
Soup = BeautifulSoup(content.text, 'lxml')
available_version = Soup.find('div', class_="entry-content").find_all('a')
for link in available_version:
pattern = re.compile(r'.*\.tar\.gz$')
match = pattern.search(link.get("href"))
if match:
file_url_list.append(downloads_page_url + match.group())
latest_version_mass = Soup.find('div', class_="entry-content").find_all('p')
pattern = re.compile(r'The latest stable version is the (.*) series\.')
match = pattern.search(str(latest_version_mass))
if match:
latest_version = match.groups()[0]
for number, item in enumerate(file_url_list):
if latest_version != "" and latest_version in file_url_list[number]:
file_url = file_url_list[number]
break
if file_url == "":
file_url = file_url_list[number]
print file_url
else:
print file_url
|
f9fe6d0cecc5229853d71fb61eaee295048a0c78
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/pywin32/win32/lib/win32cryptcon.pyi
|
c3865abff6217b38f0cd8821730b629e01ec1545
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 54,316
|
pyi
|
win32cryptcon.pyi
|
def GET_ALG_CLASS(x: int) -> int: ...
def GET_ALG_TYPE(x: int) -> int: ...
def GET_ALG_SID(x: int) -> int: ...
ALG_CLASS_ANY: int
ALG_CLASS_SIGNATURE: int
ALG_CLASS_MSG_ENCRYPT: int
ALG_CLASS_DATA_ENCRYPT: int
ALG_CLASS_HASH: int
ALG_CLASS_KEY_EXCHANGE: int
ALG_CLASS_ALL: int
ALG_TYPE_ANY: int
ALG_TYPE_DSS: int
ALG_TYPE_RSA: int
ALG_TYPE_BLOCK: int
ALG_TYPE_STREAM: int
ALG_TYPE_DH: int
ALG_TYPE_SECURECHANNEL: int
ALG_SID_ANY: int
ALG_SID_RSA_ANY: int
ALG_SID_RSA_PKCS: int
ALG_SID_RSA_MSATWORK: int
ALG_SID_RSA_ENTRUST: int
ALG_SID_RSA_PGP: int
ALG_SID_DSS_ANY: int
ALG_SID_DSS_PKCS: int
ALG_SID_DSS_DMS: int
ALG_SID_DES: int
ALG_SID_3DES: int
ALG_SID_DESX: int
ALG_SID_IDEA: int
ALG_SID_CAST: int
ALG_SID_SAFERSK64: int
ALG_SID_SAFERSK128: int
ALG_SID_3DES_112: int
ALG_SID_CYLINK_MEK: int
ALG_SID_RC5: int
ALG_SID_AES_128: int
ALG_SID_AES_192: int
ALG_SID_AES_256: int
ALG_SID_AES: int
ALG_SID_SKIPJACK: int
ALG_SID_TEK: int
CRYPT_MODE_CBCI: int
CRYPT_MODE_CFBP: int
CRYPT_MODE_OFBP: int
CRYPT_MODE_CBCOFM: int
CRYPT_MODE_CBCOFMI: int
ALG_SID_RC2: int
ALG_SID_RC4: int
ALG_SID_SEAL: int
ALG_SID_DH_SANDF: int
ALG_SID_DH_EPHEM: int
ALG_SID_AGREED_KEY_ANY: int
ALG_SID_KEA: int
ALG_SID_MD2: int
ALG_SID_MD4: int
ALG_SID_MD5: int
ALG_SID_SHA: int
ALG_SID_SHA1: int
ALG_SID_MAC: int
ALG_SID_RIPEMD: int
ALG_SID_RIPEMD160: int
ALG_SID_SSL3SHAMD5: int
ALG_SID_HMAC: int
ALG_SID_TLS1PRF: int
ALG_SID_HASH_REPLACE_OWF: int
ALG_SID_SHA_256: int
ALG_SID_SHA_384: int
ALG_SID_SHA_512: int
ALG_SID_SSL3_MASTER: int
ALG_SID_SCHANNEL_MASTER_HASH: int
ALG_SID_SCHANNEL_MAC_KEY: int
ALG_SID_PCT1_MASTER: int
ALG_SID_SSL2_MASTER: int
ALG_SID_TLS1_MASTER: int
ALG_SID_SCHANNEL_ENC_KEY: int
ALG_SID_EXAMPLE: int
CALG_MD2: int
CALG_MD4: int
CALG_MD5: int
CALG_SHA: int
CALG_SHA1: int
CALG_MAC: int
CALG_RSA_SIGN: int
CALG_DSS_SIGN: int
CALG_NO_SIGN: int
CALG_RSA_KEYX: int
CALG_DES: int
CALG_3DES_112: int
CALG_3DES: int
CALG_DESX: int
CALG_RC2: int
CALG_RC4: int
CALG_SEAL: int
CALG_DH_SF: int
CALG_DH_EPHEM: int
CALG_AGREEDKEY_ANY: int
CALG_KEA_KEYX: int
CALG_HUGHES_MD5: int
CALG_SKIPJACK: int
CALG_TEK: int
CALG_CYLINK_MEK: int
CALG_SSL3_SHAMD5: int
CALG_SSL3_MASTER: int
CALG_SCHANNEL_MASTER_HASH: int
CALG_SCHANNEL_MAC_KEY: int
CALG_SCHANNEL_ENC_KEY: int
CALG_PCT1_MASTER: int
CALG_SSL2_MASTER: int
CALG_TLS1_MASTER: int
CALG_RC5: int
CALG_HMAC: int
CALG_TLS1PRF: int
CALG_HASH_REPLACE_OWF: int
CALG_AES_128: int
CALG_AES_192: int
CALG_AES_256: int
CALG_AES: int
CALG_SHA_256: int
CALG_SHA_384: int
CALG_SHA_512: int
CRYPT_VERIFYCONTEXT: int
CRYPT_NEWKEYSET: int
CRYPT_DELETEKEYSET: int
CRYPT_MACHINE_KEYSET: int
CRYPT_SILENT: int
CRYPT_EXPORTABLE: int
CRYPT_USER_PROTECTED: int
CRYPT_CREATE_SALT: int
CRYPT_UPDATE_KEY: int
CRYPT_NO_SALT: int
CRYPT_PREGEN: int
CRYPT_RECIPIENT: int
CRYPT_INITIATOR: int
CRYPT_ONLINE: int
CRYPT_SF: int
CRYPT_CREATE_IV: int
CRYPT_KEK: int
CRYPT_DATA_KEY: int
CRYPT_VOLATILE: int
CRYPT_SGCKEY: int
CRYPT_ARCHIVABLE: int
RSA1024BIT_KEY: int
CRYPT_SERVER: int
KEY_LENGTH_MASK: int
CRYPT_Y_ONLY: int
CRYPT_SSL2_FALLBACK: int
CRYPT_DESTROYKEY: int
CRYPT_OAEP: int
CRYPT_BLOB_VER3: int
CRYPT_IPSEC_HMAC_KEY: int
CRYPT_DECRYPT_RSA_NO_PADDING_CHECK: int
CRYPT_SECRETDIGEST: int
CRYPT_OWF_REPL_LM_HASH: int
CRYPT_LITTLE_ENDIAN: int
CRYPT_NOHASHOID: int
CRYPT_TYPE2_FORMAT: int
CRYPT_X931_FORMAT: int
CRYPT_MACHINE_DEFAULT: int
CRYPT_USER_DEFAULT: int
CRYPT_DELETE_DEFAULT: int
SIMPLEBLOB: int
PUBLICKEYBLOB: int
PRIVATEKEYBLOB: int
PLAINTEXTKEYBLOB: int
OPAQUEKEYBLOB: int
PUBLICKEYBLOBEX: int
SYMMETRICWRAPKEYBLOB: int
AT_KEYEXCHANGE: int
AT_SIGNATURE: int
CRYPT_USERDATA: int
KP_IV: int
KP_SALT: int
KP_PADDING: int
KP_MODE: int
KP_MODE_BITS: int
KP_PERMISSIONS: int
KP_ALGID: int
KP_BLOCKLEN: int
KP_KEYLEN: int
KP_SALT_EX: int
KP_P: int
KP_G: int
KP_Q: int
KP_X: int
KP_Y: int
KP_RA: int
KP_RB: int
KP_INFO: int
KP_EFFECTIVE_KEYLEN: int
KP_SCHANNEL_ALG: int
KP_CLIENT_RANDOM: int
KP_SERVER_RANDOM: int
KP_RP: int
KP_PRECOMP_MD5: int
KP_PRECOMP_SHA: int
KP_CERTIFICATE: int
KP_CLEAR_KEY: int
KP_PUB_EX_LEN: int
KP_PUB_EX_VAL: int
KP_KEYVAL: int
KP_ADMIN_PIN: int
KP_KEYEXCHANGE_PIN: int
KP_SIGNATURE_PIN: int
KP_PREHASH: int
KP_ROUNDS: int
KP_OAEP_PARAMS: int
KP_CMS_KEY_INFO: int
KP_CMS_DH_KEY_INFO: int
KP_PUB_PARAMS: int
KP_VERIFY_PARAMS: int
KP_HIGHEST_VERSION: int
KP_GET_USE_COUNT: int
PKCS5_PADDING: int
RANDOM_PADDING: int
ZERO_PADDING: int
CRYPT_MODE_CBC: int
CRYPT_MODE_ECB: int
CRYPT_MODE_OFB: int
CRYPT_MODE_CFB: int
CRYPT_MODE_CTS: int
CRYPT_ENCRYPT: int
CRYPT_DECRYPT: int
CRYPT_EXPORT: int
CRYPT_READ: int
CRYPT_WRITE: int
CRYPT_MAC: int
CRYPT_EXPORT_KEY: int
CRYPT_IMPORT_KEY: int
CRYPT_ARCHIVE: int
HP_ALGID: int
HP_HASHVAL: int
HP_HASHSIZE: int
HP_HMAC_INFO: int
HP_TLS1PRF_LABEL: int
HP_TLS1PRF_SEED: int
CRYPT_FAILED: int
CRYPT_SUCCEED: int
def RCRYPT_SUCCEEDED(rt: int) -> bool: ...
def RCRYPT_FAILED(rt: int) -> bool: ...
PP_ENUMALGS: int
PP_ENUMCONTAINERS: int
PP_IMPTYPE: int
PP_NAME: int
PP_VERSION: int
PP_CONTAINER: int
PP_CHANGE_PASSWORD: int
PP_KEYSET_SEC_DESCR: int
PP_CERTCHAIN: int
PP_KEY_TYPE_SUBTYPE: int
PP_PROVTYPE: int
PP_KEYSTORAGE: int
PP_APPLI_CERT: int
PP_SYM_KEYSIZE: int
PP_SESSION_KEYSIZE: int
PP_UI_PROMPT: int
PP_ENUMALGS_EX: int
PP_ENUMMANDROOTS: int
PP_ENUMELECTROOTS: int
PP_KEYSET_TYPE: int
PP_ADMIN_PIN: int
PP_KEYEXCHANGE_PIN: int
PP_SIGNATURE_PIN: int
PP_SIG_KEYSIZE_INC: int
PP_KEYX_KEYSIZE_INC: int
PP_UNIQUE_CONTAINER: int
PP_SGC_INFO: int
PP_USE_HARDWARE_RNG: int
PP_KEYSPEC: int
PP_ENUMEX_SIGNING_PROT: int
PP_CRYPT_COUNT_KEY_USE: int
CRYPT_FIRST: int
CRYPT_NEXT: int
CRYPT_SGC_ENUM: int
CRYPT_IMPL_HARDWARE: int
CRYPT_IMPL_SOFTWARE: int
CRYPT_IMPL_MIXED: int
CRYPT_IMPL_UNKNOWN: int
CRYPT_IMPL_REMOVABLE: int
CRYPT_SEC_DESCR: int
CRYPT_PSTORE: int
CRYPT_UI_PROMPT: int
CRYPT_FLAG_PCT1: int
CRYPT_FLAG_SSL2: int
CRYPT_FLAG_SSL3: int
CRYPT_FLAG_TLS1: int
CRYPT_FLAG_IPSEC: int
CRYPT_FLAG_SIGNING: int
CRYPT_SGC: int
CRYPT_FASTSGC: int
PP_CLIENT_HWND: int
PP_CONTEXT_INFO: int
PP_KEYEXCHANGE_KEYSIZE: int
PP_SIGNATURE_KEYSIZE: int
PP_KEYEXCHANGE_ALG: int
PP_SIGNATURE_ALG: int
PP_DELETEKEY: int
PROV_RSA_FULL: int
PROV_RSA_SIG: int
PROV_DSS: int
PROV_FORTEZZA: int
PROV_MS_EXCHANGE: int
PROV_SSL: int
PROV_RSA_SCHANNEL: int
PROV_DSS_DH: int
PROV_EC_ECDSA_SIG: int
PROV_EC_ECNRA_SIG: int
PROV_EC_ECDSA_FULL: int
PROV_EC_ECNRA_FULL: int
PROV_DH_SCHANNEL: int
PROV_SPYRUS_LYNKS: int
PROV_RNG: int
PROV_INTEL_SEC: int
PROV_REPLACE_OWF: int
PROV_RSA_AES: int
MS_DEF_PROV_A: str
MS_DEF_PROV: str
MS_ENHANCED_PROV_A: str
MS_ENHANCED_PROV: str
MS_STRONG_PROV_A: str
MS_STRONG_PROV: str
MS_DEF_RSA_SIG_PROV_A: str
MS_DEF_RSA_SIG_PROV: str
MS_DEF_RSA_SCHANNEL_PROV_A: str
MS_DEF_RSA_SCHANNEL_PROV: str
MS_DEF_DSS_PROV_A: str
MS_DEF_DSS_PROV: str
MS_DEF_DSS_DH_PROV_A: str
MS_DEF_DSS_DH_PROV: str
MS_ENH_DSS_DH_PROV_A: str
MS_ENH_DSS_DH_PROV: str
MS_DEF_DH_SCHANNEL_PROV_A: str
MS_DEF_DH_SCHANNEL_PROV: str
MS_SCARD_PROV_A: str
MS_SCARD_PROV: str
MS_ENH_RSA_AES_PROV_A: str
MS_ENH_RSA_AES_PROV: str
MAXUIDLEN: int
EXPO_OFFLOAD_REG_VALUE: str
EXPO_OFFLOAD_FUNC_NAME: str
szKEY_CRYPTOAPI_PRIVATE_KEY_OPTIONS: str
szFORCE_KEY_PROTECTION: str
dwFORCE_KEY_PROTECTION_DISABLED: int
dwFORCE_KEY_PROTECTION_USER_SELECT: int
dwFORCE_KEY_PROTECTION_HIGH: int
szKEY_CACHE_ENABLED: str
szKEY_CACHE_SECONDS: str
CUR_BLOB_VERSION: int
SCHANNEL_MAC_KEY: int
SCHANNEL_ENC_KEY: int
INTERNATIONAL_USAGE: int
szOID_RSA: str
szOID_PKCS: str
szOID_RSA_HASH: str
szOID_RSA_ENCRYPT: str
szOID_PKCS_1: str
szOID_PKCS_2: str
szOID_PKCS_3: str
szOID_PKCS_4: str
szOID_PKCS_5: str
szOID_PKCS_6: str
szOID_PKCS_7: str
szOID_PKCS_8: str
szOID_PKCS_9: str
szOID_PKCS_10: str
szOID_PKCS_12: str
szOID_RSA_RSA: str
szOID_RSA_MD2RSA: str
szOID_RSA_MD4RSA: str
szOID_RSA_MD5RSA: str
szOID_RSA_SHA1RSA: str
szOID_RSA_SETOAEP_RSA: str
szOID_RSA_DH: str
szOID_RSA_data: str
szOID_RSA_signedData: str
szOID_RSA_envelopedData: str
szOID_RSA_signEnvData: str
szOID_RSA_digestedData: str
szOID_RSA_hashedData: str
szOID_RSA_encryptedData: str
szOID_RSA_emailAddr: str
szOID_RSA_unstructName: str
szOID_RSA_contentType: str
szOID_RSA_messageDigest: str
szOID_RSA_signingTime: str
szOID_RSA_counterSign: str
szOID_RSA_challengePwd: str
szOID_RSA_unstructAddr: str
szOID_RSA_extCertAttrs: str
szOID_RSA_certExtensions: str
szOID_RSA_SMIMECapabilities: str
szOID_RSA_preferSignedData: str
szOID_RSA_SMIMEalg: str
szOID_RSA_SMIMEalgESDH: str
szOID_RSA_SMIMEalgCMS3DESwrap: str
szOID_RSA_SMIMEalgCMSRC2wrap: str
szOID_RSA_MD2: str
szOID_RSA_MD4: str
szOID_RSA_MD5: str
szOID_RSA_RC2CBC: str
szOID_RSA_RC4: str
szOID_RSA_DES_EDE3_CBC: str
szOID_RSA_RC5_CBCPad: str
szOID_ANSI_X942: str
szOID_ANSI_X942_DH: str
szOID_X957: str
szOID_X957_DSA: str
szOID_X957_SHA1DSA: str
szOID_DS: str
szOID_DSALG: str
szOID_DSALG_CRPT: str
szOID_DSALG_HASH: str
szOID_DSALG_SIGN: str
szOID_DSALG_RSA: str
szOID_OIW: str
szOID_OIWSEC: str
szOID_OIWSEC_md4RSA: str
szOID_OIWSEC_md5RSA: str
szOID_OIWSEC_md4RSA2: str
szOID_OIWSEC_desECB: str
szOID_OIWSEC_desCBC: str
szOID_OIWSEC_desOFB: str
szOID_OIWSEC_desCFB: str
szOID_OIWSEC_desMAC: str
szOID_OIWSEC_rsaSign: str
szOID_OIWSEC_dsa: str
szOID_OIWSEC_shaDSA: str
szOID_OIWSEC_mdc2RSA: str
szOID_OIWSEC_shaRSA: str
szOID_OIWSEC_dhCommMod: str
szOID_OIWSEC_desEDE: str
szOID_OIWSEC_sha: str
szOID_OIWSEC_mdc2: str
szOID_OIWSEC_dsaComm: str
szOID_OIWSEC_dsaCommSHA: str
szOID_OIWSEC_rsaXchg: str
szOID_OIWSEC_keyHashSeal: str
szOID_OIWSEC_md2RSASign: str
szOID_OIWSEC_md5RSASign: str
szOID_OIWSEC_sha1: str
szOID_OIWSEC_dsaSHA1: str
szOID_OIWSEC_dsaCommSHA1: str
szOID_OIWSEC_sha1RSASign: str
szOID_OIWDIR: str
szOID_OIWDIR_CRPT: str
szOID_OIWDIR_HASH: str
szOID_OIWDIR_SIGN: str
szOID_OIWDIR_md2: str
szOID_OIWDIR_md2RSA: str
szOID_INFOSEC: str
szOID_INFOSEC_sdnsSignature: str
szOID_INFOSEC_mosaicSignature: str
szOID_INFOSEC_sdnsConfidentiality: str
szOID_INFOSEC_mosaicConfidentiality: str
szOID_INFOSEC_sdnsIntegrity: str
szOID_INFOSEC_mosaicIntegrity: str
szOID_INFOSEC_sdnsTokenProtection: str
szOID_INFOSEC_mosaicTokenProtection: str
szOID_INFOSEC_sdnsKeyManagement: str
szOID_INFOSEC_mosaicKeyManagement: str
szOID_INFOSEC_sdnsKMandSig: str
szOID_INFOSEC_mosaicKMandSig: str
szOID_INFOSEC_SuiteASignature: str
szOID_INFOSEC_SuiteAConfidentiality: str
szOID_INFOSEC_SuiteAIntegrity: str
szOID_INFOSEC_SuiteATokenProtection: str
szOID_INFOSEC_SuiteAKeyManagement: str
szOID_INFOSEC_SuiteAKMandSig: str
szOID_INFOSEC_mosaicUpdatedSig: str
szOID_INFOSEC_mosaicKMandUpdSig: str
szOID_INFOSEC_mosaicUpdatedInteg: str
szOID_COMMON_NAME: str
szOID_SUR_NAME: str
szOID_DEVICE_SERIAL_NUMBER: str
szOID_COUNTRY_NAME: str
szOID_LOCALITY_NAME: str
szOID_STATE_OR_PROVINCE_NAME: str
szOID_STREET_ADDRESS: str
szOID_ORGANIZATION_NAME: str
szOID_ORGANIZATIONAL_UNIT_NAME: str
szOID_TITLE: str
szOID_DESCRIPTION: str
szOID_SEARCH_GUIDE: str
szOID_BUSINESS_CATEGORY: str
szOID_POSTAL_ADDRESS: str
szOID_POSTAL_CODE: str
szOID_POST_OFFICE_BOX: str
szOID_PHYSICAL_DELIVERY_OFFICE_NAME: str
szOID_TELEPHONE_NUMBER: str
szOID_TELEX_NUMBER: str
szOID_TELETEXT_TERMINAL_IDENTIFIER: str
szOID_FACSIMILE_TELEPHONE_NUMBER: str
szOID_X21_ADDRESS: str
szOID_INTERNATIONAL_ISDN_NUMBER: str
szOID_REGISTERED_ADDRESS: str
szOID_DESTINATION_INDICATOR: str
szOID_PREFERRED_DELIVERY_METHOD: str
szOID_PRESENTATION_ADDRESS: str
szOID_SUPPORTED_APPLICATION_CONTEXT: str
szOID_MEMBER: str
szOID_OWNER: str
szOID_ROLE_OCCUPANT: str
szOID_SEE_ALSO: str
szOID_USER_PASSWORD: str
szOID_USER_CERTIFICATE: str
szOID_CA_CERTIFICATE: str
szOID_CROSS_CERTIFICATE_PAIR: str
szOID_GIVEN_NAME: str
szOID_INITIALS: str
szOID_DN_QUALIFIER: str
szOID_DOMAIN_COMPONENT: str
szOID_PKCS_12_FRIENDLY_NAME_ATTR: str
szOID_PKCS_12_LOCAL_KEY_ID: str
szOID_PKCS_12_KEY_PROVIDER_NAME_ATTR: str
szOID_LOCAL_MACHINE_KEYSET: str
szOID_KEYID_RDN: str
CERT_RDN_ANY_TYPE: int
CERT_RDN_ENCODED_BLOB: int
CERT_RDN_OCTET_STRING: int
CERT_RDN_NUMERIC_STRING: int
CERT_RDN_PRINTABLE_STRING: int
CERT_RDN_TELETEX_STRING: int
CERT_RDN_T61_STRING: int
CERT_RDN_VIDEOTEX_STRING: int
CERT_RDN_IA5_STRING: int
CERT_RDN_GRAPHIC_STRING: int
CERT_RDN_VISIBLE_STRING: int
CERT_RDN_ISO646_STRING: int
CERT_RDN_GENERAL_STRING: int
CERT_RDN_UNIVERSAL_STRING: int
CERT_RDN_INT4_STRING: int
CERT_RDN_BMP_STRING: int
CERT_RDN_UNICODE_STRING: int
CERT_RDN_UTF8_STRING: int
CERT_RDN_TYPE_MASK: int
CERT_RDN_FLAGS_MASK: int
CERT_RDN_ENABLE_T61_UNICODE_FLAG: int
CERT_RDN_ENABLE_UTF8_UNICODE_FLAG: int
CERT_RDN_DISABLE_CHECK_TYPE_FLAG: int
CERT_RDN_DISABLE_IE4_UTF8_FLAG: int
CERT_RSA_PUBLIC_KEY_OBJID: str
CERT_DEFAULT_OID_PUBLIC_KEY_SIGN: str
CERT_DEFAULT_OID_PUBLIC_KEY_XCHG: str
CERT_V1: int
CERT_V2: int
CERT_V3: int
CERT_INFO_VERSION_FLAG: int
CERT_INFO_SERIAL_NUMBER_FLAG: int
CERT_INFO_SIGNATURE_ALGORITHM_FLAG: int
CERT_INFO_ISSUER_FLAG: int
CERT_INFO_NOT_BEFORE_FLAG: int
CERT_INFO_NOT_AFTER_FLAG: int
CERT_INFO_SUBJECT_FLAG: int
CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG: int
CERT_INFO_ISSUER_UNIQUE_ID_FLAG: int
CERT_INFO_SUBJECT_UNIQUE_ID_FLAG: int
CERT_INFO_EXTENSION_FLAG: int
CRL_V1: int
CRL_V2: int
CERT_REQUEST_V1: int
CERT_KEYGEN_REQUEST_V1: int
CTL_V1: int
CERT_ENCODING_TYPE_MASK: int
CMSG_ENCODING_TYPE_MASK: int
def GET_CERT_ENCODING_TYPE(X: int) -> int: ...
def GET_CMSG_ENCODING_TYPE(X: int) -> int: ...
CRYPT_ASN_ENCODING: int
CRYPT_NDR_ENCODING: int
X509_ASN_ENCODING: int
X509_NDR_ENCODING: int
PKCS_7_ASN_ENCODING: int
PKCS_7_NDR_ENCODING: int
CRYPT_FORMAT_STR_MULTI_LINE: int
CRYPT_FORMAT_STR_NO_HEX: int
CRYPT_FORMAT_SIMPLE: int
CRYPT_FORMAT_X509: int
CRYPT_FORMAT_OID: int
CRYPT_FORMAT_RDN_SEMICOLON: int
CRYPT_FORMAT_RDN_CRLF: int
CRYPT_FORMAT_RDN_UNQUOTE: int
CRYPT_FORMAT_RDN_REVERSE: int
CRYPT_FORMAT_COMMA: int
CRYPT_FORMAT_SEMICOLON: int
CRYPT_FORMAT_CRLF: int
CRYPT_ENCODE_NO_SIGNATURE_BYTE_REVERSAL_FLAG: int
CRYPT_ENCODE_ALLOC_FLAG: int
CRYPT_UNICODE_NAME_ENCODE_ENABLE_T61_UNICODE_FLAG: int
CRYPT_UNICODE_NAME_ENCODE_ENABLE_UTF8_UNICODE_FLAG: int
CRYPT_UNICODE_NAME_ENCODE_DISABLE_CHECK_TYPE_FLAG: int
CRYPT_SORTED_CTL_ENCODE_HASHED_SUBJECT_IDENTIFIER_FLAG: int
CRYPT_DECODE_NOCOPY_FLAG: int
CRYPT_DECODE_TO_BE_SIGNED_FLAG: int
CRYPT_DECODE_SHARE_OID_STRING_FLAG: int
CRYPT_DECODE_NO_SIGNATURE_BYTE_REVERSAL_FLAG: int
CRYPT_DECODE_ALLOC_FLAG: int
CRYPT_UNICODE_NAME_DECODE_DISABLE_IE4_UTF8_FLAG: int
CRYPT_ENCODE_DECODE_NONE: int
X509_CERT: int
X509_CERT_TO_BE_SIGNED: int
X509_CERT_CRL_TO_BE_SIGNED: int
X509_CERT_REQUEST_TO_BE_SIGNED: int
X509_EXTENSIONS: int
X509_NAME_VALUE: int
X509_NAME: int
X509_PUBLIC_KEY_INFO: int
X509_AUTHORITY_KEY_ID: int
X509_KEY_ATTRIBUTES: int
X509_KEY_USAGE_RESTRICTION: int
X509_ALTERNATE_NAME: int
X509_BASIC_CONSTRAINTS: int
X509_KEY_USAGE: int
X509_BASIC_CONSTRAINTS2: int
X509_CERT_POLICIES: int
PKCS_UTC_TIME: int
PKCS_TIME_REQUEST: int
RSA_CSP_PUBLICKEYBLOB: int
X509_UNICODE_NAME: int
X509_KEYGEN_REQUEST_TO_BE_SIGNED: int
PKCS_ATTRIBUTE: int
PKCS_CONTENT_INFO_SEQUENCE_OF_ANY: int
X509_UNICODE_NAME_VALUE: int
X509_ANY_STRING: int
X509_UNICODE_ANY_STRING: int
X509_OCTET_STRING: int
X509_BITS: int
X509_INTEGER: int
X509_MULTI_BYTE_INTEGER: int
X509_ENUMERATED: int
X509_CHOICE_OF_TIME: int
X509_AUTHORITY_KEY_ID2: int
X509_AUTHORITY_INFO_ACCESS: int
X509_SUBJECT_INFO_ACCESS: int
X509_CRL_REASON_CODE: int
PKCS_CONTENT_INFO: int
X509_SEQUENCE_OF_ANY: int
X509_CRL_DIST_POINTS: int
X509_ENHANCED_KEY_USAGE: int
PKCS_CTL: int
X509_MULTI_BYTE_UINT: int
X509_DSS_PUBLICKEY: int
X509_DSS_PARAMETERS: int
X509_DSS_SIGNATURE: int
PKCS_RC2_CBC_PARAMETERS: int
PKCS_SMIME_CAPABILITIES: int
X509_QC_STATEMENTS_EXT: int
PKCS_RSA_PRIVATE_KEY: int
PKCS_PRIVATE_KEY_INFO: int
PKCS_ENCRYPTED_PRIVATE_KEY_INFO: int
X509_PKIX_POLICY_QUALIFIER_USERNOTICE: int
X509_DH_PUBLICKEY: int
X509_DH_PARAMETERS: int
PKCS_ATTRIBUTES: int
PKCS_SORTED_CTL: int
X509_ECC_SIGNATURE: int
X942_DH_PARAMETERS: int
X509_BITS_WITHOUT_TRAILING_ZEROES: int
X942_OTHER_INFO: int
X509_CERT_PAIR: int
X509_ISSUING_DIST_POINT: int
X509_NAME_CONSTRAINTS: int
X509_POLICY_MAPPINGS: int
X509_POLICY_CONSTRAINTS: int
X509_CROSS_CERT_DIST_POINTS: int
CMC_DATA: int
CMC_RESPONSE: int
CMC_STATUS: int
CMC_ADD_EXTENSIONS: int
CMC_ADD_ATTRIBUTES: int
X509_CERTIFICATE_TEMPLATE: int
OCSP_SIGNED_REQUEST: int
OCSP_REQUEST: int
OCSP_RESPONSE: int
OCSP_BASIC_SIGNED_RESPONSE: int
OCSP_BASIC_RESPONSE: int
X509_LOGOTYPE_EXT: int
X509_BIOMETRIC_EXT: int
CNG_RSA_PUBLIC_KEY_BLOB: int
X509_OBJECT_IDENTIFIER: int
X509_ALGORITHM_IDENTIFIER: int
PKCS_RSA_SSA_PSS_PARAMETERS: int
PKCS_RSAES_OAEP_PARAMETERS: int
ECC_CMS_SHARED_INFO: int
TIMESTAMP_REQUEST: int
TIMESTAMP_RESPONSE: int
TIMESTAMP_INFO: int
X509_CERT_BUNDLE: int
PKCS7_SIGNER_INFO: int
CMS_SIGNER_INFO: int
szOID_AUTHORITY_KEY_IDENTIFIER: str
szOID_KEY_ATTRIBUTES: str
szOID_CERT_POLICIES_95: str
szOID_KEY_USAGE_RESTRICTION: str
szOID_SUBJECT_ALT_NAME: str
szOID_ISSUER_ALT_NAME: str
szOID_BASIC_CONSTRAINTS: str
szOID_KEY_USAGE: str
szOID_PRIVATEKEY_USAGE_PERIOD: str
szOID_BASIC_CONSTRAINTS2: str
szOID_CERT_POLICIES: str
szOID_ANY_CERT_POLICY: str
szOID_AUTHORITY_KEY_IDENTIFIER2: str
szOID_SUBJECT_KEY_IDENTIFIER: str
szOID_SUBJECT_ALT_NAME2: str
szOID_ISSUER_ALT_NAME2: str
szOID_CRL_REASON_CODE: str
szOID_REASON_CODE_HOLD: str
szOID_CRL_DIST_POINTS: str
szOID_ENHANCED_KEY_USAGE: str
szOID_CRL_NUMBER: str
szOID_DELTA_CRL_INDICATOR: str
szOID_ISSUING_DIST_POINT: str
szOID_FRESHEST_CRL: str
szOID_NAME_CONSTRAINTS: str
szOID_POLICY_MAPPINGS: str
szOID_LEGACY_POLICY_MAPPINGS: str
szOID_POLICY_CONSTRAINTS: str
szOID_RENEWAL_CERTIFICATE: str
szOID_ENROLLMENT_NAME_VALUE_PAIR: str
szOID_ENROLLMENT_CSP_PROVIDER: str
szOID_OS_VERSION: str
szOID_ENROLLMENT_AGENT: str
szOID_PKIX: str
szOID_PKIX_PE: str
szOID_AUTHORITY_INFO_ACCESS: str
szOID_CERT_EXTENSIONS: str
szOID_NEXT_UPDATE_LOCATION: str
szOID_REMOVE_CERTIFICATE: str
szOID_CROSS_CERT_DIST_POINTS: str
szOID_CTL: str
szOID_SORTED_CTL: str
szOID_SERIALIZED: str
szOID_NT_PRINCIPAL_NAME: str
szOID_PRODUCT_UPDATE: str
szOID_ANY_APPLICATION_POLICY: str
szOID_AUTO_ENROLL_CTL_USAGE: str
szOID_ENROLL_CERTTYPE_EXTENSION: str
szOID_CERT_MANIFOLD: str
szOID_CERTSRV_CA_VERSION: str
szOID_CERTSRV_PREVIOUS_CERT_HASH: str
szOID_CRL_VIRTUAL_BASE: str
szOID_CRL_NEXT_PUBLISH: str
szOID_KP_CA_EXCHANGE: str
szOID_KP_KEY_RECOVERY_AGENT: str
szOID_CERTIFICATE_TEMPLATE: str
szOID_ENTERPRISE_OID_ROOT: str
szOID_RDN_DUMMY_SIGNER: str
szOID_APPLICATION_CERT_POLICIES: str
szOID_APPLICATION_POLICY_MAPPINGS: str
szOID_APPLICATION_POLICY_CONSTRAINTS: str
szOID_ARCHIVED_KEY_ATTR: str
szOID_CRL_SELF_CDP: str
szOID_REQUIRE_CERT_CHAIN_POLICY: str
szOID_ARCHIVED_KEY_CERT_HASH: str
szOID_ISSUED_CERT_HASH: str
szOID_DS_EMAIL_REPLICATION: str
szOID_REQUEST_CLIENT_INFO: str
szOID_ENCRYPTED_KEY_HASH: str
szOID_CERTSRV_CROSSCA_VERSION: str
szOID_NTDS_REPLICATION: str
szOID_SUBJECT_DIR_ATTRS: str
szOID_PKIX_KP: str
szOID_PKIX_KP_SERVER_AUTH: str
szOID_PKIX_KP_CLIENT_AUTH: str
szOID_PKIX_KP_CODE_SIGNING: str
szOID_PKIX_KP_EMAIL_PROTECTION: str
szOID_PKIX_KP_IPSEC_END_SYSTEM: str
szOID_PKIX_KP_IPSEC_TUNNEL: str
szOID_PKIX_KP_IPSEC_USER: str
szOID_PKIX_KP_TIMESTAMP_SIGNING: str
szOID_IPSEC_KP_IKE_INTERMEDIATE: str
szOID_KP_CTL_USAGE_SIGNING: str
szOID_KP_TIME_STAMP_SIGNING: str
szOID_SERVER_GATED_CRYPTO: str
szOID_SGC_NETSCAPE: str
szOID_KP_EFS: str
szOID_EFS_RECOVERY: str
szOID_WHQL_CRYPTO: str
szOID_NT5_CRYPTO: str
szOID_OEM_WHQL_CRYPTO: str
szOID_EMBEDDED_NT_CRYPTO: str
szOID_KP_QUALIFIED_SUBORDINATION: str
szOID_KP_KEY_RECOVERY: str
szOID_KP_DOCUMENT_SIGNING: str
szOID_KP_LIFETIME_SIGNING: str
szOID_KP_MOBILE_DEVICE_SOFTWARE: str
szOID_DRM: str
szOID_DRM_INDIVIDUALIZATION: str
szOID_LICENSES: str
szOID_LICENSE_SERVER: str
szOID_KP_SMARTCARD_LOGON: str
szOID_YESNO_TRUST_ATTR: str
szOID_PKIX_POLICY_QUALIFIER_CPS: str
szOID_PKIX_POLICY_QUALIFIER_USERNOTICE: str
szOID_CERT_POLICIES_95_QUALIFIER1: str
CERT_UNICODE_RDN_ERR_INDEX_MASK: int
CERT_UNICODE_RDN_ERR_INDEX_SHIFT: int
CERT_UNICODE_ATTR_ERR_INDEX_MASK: int
CERT_UNICODE_ATTR_ERR_INDEX_SHIFT: int
CERT_UNICODE_VALUE_ERR_INDEX_MASK: int
CERT_UNICODE_VALUE_ERR_INDEX_SHIFT: int
CERT_DIGITAL_SIGNATURE_KEY_USAGE: int
CERT_NON_REPUDIATION_KEY_USAGE: int
CERT_KEY_ENCIPHERMENT_KEY_USAGE: int
CERT_DATA_ENCIPHERMENT_KEY_USAGE: int
CERT_KEY_AGREEMENT_KEY_USAGE: int
CERT_KEY_CERT_SIGN_KEY_USAGE: int
CERT_OFFLINE_CRL_SIGN_KEY_USAGE: int
CERT_CRL_SIGN_KEY_USAGE: int
CERT_ENCIPHER_ONLY_KEY_USAGE: int
CERT_DECIPHER_ONLY_KEY_USAGE: int
CERT_ALT_NAME_OTHER_NAME: int
CERT_ALT_NAME_RFC822_NAME: int
CERT_ALT_NAME_DNS_NAME: int
CERT_ALT_NAME_X400_ADDRESS: int
CERT_ALT_NAME_DIRECTORY_NAME: int
CERT_ALT_NAME_EDI_PARTY_NAME: int
CERT_ALT_NAME_URL: int
CERT_ALT_NAME_IP_ADDRESS: int
CERT_ALT_NAME_REGISTERED_ID: int
CERT_ALT_NAME_ENTRY_ERR_INDEX_MASK: int
CERT_ALT_NAME_ENTRY_ERR_INDEX_SHIFT: int
CERT_ALT_NAME_VALUE_ERR_INDEX_MASK: int
CERT_ALT_NAME_VALUE_ERR_INDEX_SHIFT: int
CERT_CA_SUBJECT_FLAG: int
CERT_END_ENTITY_SUBJECT_FLAG: int
szOID_PKIX_ACC_DESCR: str
szOID_PKIX_OCSP: str
szOID_PKIX_CA_ISSUERS: str
CRL_REASON_UNSPECIFIED: int
CRL_REASON_KEY_COMPROMISE: int
CRL_REASON_CA_COMPROMISE: int
CRL_REASON_AFFILIATION_CHANGED: int
CRL_REASON_SUPERSEDED: int
CRL_REASON_CESSATION_OF_OPERATION: int
CRL_REASON_CERTIFICATE_HOLD: int
CRL_REASON_REMOVE_FROM_CRL: int
CRL_DIST_POINT_NO_NAME: int
CRL_DIST_POINT_FULL_NAME: int
CRL_DIST_POINT_ISSUER_RDN_NAME: int
CRL_REASON_UNUSED_FLAG: int
CRL_REASON_KEY_COMPROMISE_FLAG: int
CRL_REASON_CA_COMPROMISE_FLAG: int
CRL_REASON_AFFILIATION_CHANGED_FLAG: int
CRL_REASON_SUPERSEDED_FLAG: int
CRL_REASON_CESSATION_OF_OPERATION_FLAG: int
CRL_REASON_CERTIFICATE_HOLD_FLAG: int
CRL_DIST_POINT_ERR_INDEX_MASK: int
CRL_DIST_POINT_ERR_INDEX_SHIFT: int
CRL_DIST_POINT_ERR_CRL_ISSUER_BIT: int
CROSS_CERT_DIST_POINT_ERR_INDEX_MASK: int
CROSS_CERT_DIST_POINT_ERR_INDEX_SHIFT: int
CERT_EXCLUDED_SUBTREE_BIT: int
SORTED_CTL_EXT_FLAGS_OFFSET: int
SORTED_CTL_EXT_COUNT_OFFSET: int
SORTED_CTL_EXT_MAX_COLLISION_OFFSET: int
SORTED_CTL_EXT_HASH_BUCKET_OFFSET: int
SORTED_CTL_EXT_HASHED_SUBJECT_IDENTIFIER_FLAG: int
CERT_DSS_R_LEN: int
CERT_DSS_S_LEN: int
CERT_DSS_SIGNATURE_LEN: int
CERT_MAX_ASN_ENCODED_DSS_SIGNATURE_LEN: int
CRYPT_X942_COUNTER_BYTE_LENGTH: int
CRYPT_X942_KEY_LENGTH_BYTE_LENGTH: int
CRYPT_X942_PUB_INFO_BYTE_LENGTH: float
CRYPT_RC2_40BIT_VERSION: int
CRYPT_RC2_56BIT_VERSION: int
CRYPT_RC2_64BIT_VERSION: int
CRYPT_RC2_128BIT_VERSION: int
szOID_VERISIGN_PRIVATE_6_9: str
szOID_VERISIGN_ONSITE_JURISDICTION_HASH: str
szOID_VERISIGN_BITSTRING_6_13: str
szOID_VERISIGN_ISS_STRONG_CRYPTO: str
szOID_NETSCAPE: str
szOID_NETSCAPE_CERT_EXTENSION: str
szOID_NETSCAPE_CERT_TYPE: str
szOID_NETSCAPE_BASE_URL: str
szOID_NETSCAPE_REVOCATION_URL: str
szOID_NETSCAPE_CA_REVOCATION_URL: str
szOID_NETSCAPE_CERT_RENEWAL_URL: str
szOID_NETSCAPE_CA_POLICY_URL: str
szOID_NETSCAPE_SSL_SERVER_NAME: str
szOID_NETSCAPE_COMMENT: str
szOID_NETSCAPE_DATA_TYPE: str
szOID_NETSCAPE_CERT_SEQUENCE: str
NETSCAPE_SSL_CLIENT_AUTH_CERT_TYPE: int
NETSCAPE_SSL_SERVER_AUTH_CERT_TYPE: int
NETSCAPE_SMIME_CERT_TYPE: int
NETSCAPE_SIGN_CERT_TYPE: int
NETSCAPE_SSL_CA_CERT_TYPE: int
NETSCAPE_SMIME_CA_CERT_TYPE: int
NETSCAPE_SIGN_CA_CERT_TYPE: int
szOID_CT_PKI_DATA: str
szOID_CT_PKI_RESPONSE: str
szOID_PKIX_NO_SIGNATURE: str
szOID_CMC: str
szOID_CMC_STATUS_INFO: str
szOID_CMC_IDENTIFICATION: str
szOID_CMC_IDENTITY_PROOF: str
szOID_CMC_DATA_RETURN: str
szOID_CMC_TRANSACTION_ID: str
szOID_CMC_SENDER_NONCE: str
szOID_CMC_RECIPIENT_NONCE: str
szOID_CMC_ADD_EXTENSIONS: str
szOID_CMC_ENCRYPTED_POP: str
szOID_CMC_DECRYPTED_POP: str
szOID_CMC_LRA_POP_WITNESS: str
szOID_CMC_GET_CERT: str
szOID_CMC_GET_CRL: str
szOID_CMC_REVOKE_REQUEST: str
szOID_CMC_REG_INFO: str
szOID_CMC_RESPONSE_INFO: str
szOID_CMC_QUERY_PENDING: str
szOID_CMC_ID_POP_LINK_RANDOM: str
szOID_CMC_ID_POP_LINK_WITNESS: str
szOID_CMC_ID_CONFIRM_CERT_ACCEPTANCE: str
szOID_CMC_ADD_ATTRIBUTES: str
CMC_TAGGED_CERT_REQUEST_CHOICE: int
CMC_OTHER_INFO_NO_CHOICE: int
CMC_OTHER_INFO_FAIL_CHOICE: int
CMC_OTHER_INFO_PEND_CHOICE: int
CMC_STATUS_SUCCESS: int
CMC_STATUS_FAILED: int
CMC_STATUS_PENDING: int
CMC_STATUS_NO_SUPPORT: int
CMC_STATUS_CONFIRM_REQUIRED: int
CMC_FAIL_BAD_ALG: int
CMC_FAIL_BAD_MESSAGE_CHECK: int
CMC_FAIL_BAD_REQUEST: int
CMC_FAIL_BAD_TIME: int
CMC_FAIL_BAD_CERT_ID: int
CMC_FAIL_UNSUPORTED_EXT: int
CMC_FAIL_MUST_ARCHIVE_KEYS: int
CMC_FAIL_BAD_IDENTITY: int
CMC_FAIL_POP_REQUIRED: int
CMC_FAIL_POP_FAILED: int
CMC_FAIL_NO_KEY_REUSE: int
CMC_FAIL_INTERNAL_CA_ERROR: int
CMC_FAIL_TRY_LATER: int
CRYPT_OID_ENCODE_OBJECT_FUNC: str
CRYPT_OID_DECODE_OBJECT_FUNC: str
CRYPT_OID_ENCODE_OBJECT_EX_FUNC: str
CRYPT_OID_DECODE_OBJECT_EX_FUNC: str
CRYPT_OID_CREATE_COM_OBJECT_FUNC: str
CRYPT_OID_VERIFY_REVOCATION_FUNC: str
CRYPT_OID_VERIFY_CTL_USAGE_FUNC: str
CRYPT_OID_FORMAT_OBJECT_FUNC: str
CRYPT_OID_FIND_OID_INFO_FUNC: str
CRYPT_OID_FIND_LOCALIZED_NAME_FUNC: str
CRYPT_OID_REGPATH: str
CRYPT_OID_REG_ENCODING_TYPE_PREFIX: str
CRYPT_OID_REG_DLL_VALUE_NAME: str
CRYPT_OID_REG_FUNC_NAME_VALUE_NAME: str
CRYPT_OID_REG_FUNC_NAME_VALUE_NAME_A: str
CRYPT_OID_REG_FLAGS_VALUE_NAME: str
CRYPT_DEFAULT_OID: str
CRYPT_INSTALL_OID_FUNC_BEFORE_FLAG: int
CRYPT_GET_INSTALLED_OID_FUNC_FLAG: int
CRYPT_REGISTER_FIRST_INDEX: int
CRYPT_REGISTER_LAST_INDEX: int
CRYPT_MATCH_ANY_ENCODING_TYPE: int
CRYPT_HASH_ALG_OID_GROUP_ID: int
CRYPT_ENCRYPT_ALG_OID_GROUP_ID: int
CRYPT_PUBKEY_ALG_OID_GROUP_ID: int
CRYPT_SIGN_ALG_OID_GROUP_ID: int
CRYPT_RDN_ATTR_OID_GROUP_ID: int
CRYPT_EXT_OR_ATTR_OID_GROUP_ID: int
CRYPT_ENHKEY_USAGE_OID_GROUP_ID: int
CRYPT_POLICY_OID_GROUP_ID: int
CRYPT_TEMPLATE_OID_GROUP_ID: int
CRYPT_LAST_OID_GROUP_ID: int
CRYPT_FIRST_ALG_OID_GROUP_ID: int
CRYPT_LAST_ALG_OID_GROUP_ID: int
CRYPT_OID_INHIBIT_SIGNATURE_FORMAT_FLAG: int
CRYPT_OID_USE_PUBKEY_PARA_FOR_PKCS7_FLAG: int
CRYPT_OID_NO_NULL_ALGORITHM_PARA_FLAG: int
CRYPT_OID_INFO_OID_KEY: int
CRYPT_OID_INFO_NAME_KEY: int
CRYPT_OID_INFO_ALGID_KEY: int
CRYPT_OID_INFO_SIGN_KEY: int
CRYPT_INSTALL_OID_INFO_BEFORE_FLAG: int
CRYPT_LOCALIZED_NAME_ENCODING_TYPE: int
CRYPT_LOCALIZED_NAME_OID: str
szOID_PKCS_7_DATA: str
szOID_PKCS_7_SIGNED: str
szOID_PKCS_7_ENVELOPED: str
szOID_PKCS_7_SIGNEDANDENVELOPED: str
szOID_PKCS_7_DIGESTED: str
szOID_PKCS_7_ENCRYPTED: str
szOID_PKCS_9_CONTENT_TYPE: str
szOID_PKCS_9_MESSAGE_DIGEST: str
CMSG_DATA: int
CMSG_SIGNED: int
CMSG_ENVELOPED: int
CMSG_SIGNED_AND_ENVELOPED: int
CMSG_HASHED: int
CMSG_ENCRYPTED: int
CMSG_ALL_FLAGS: int
CMSG_DATA_FLAG: int
CMSG_SIGNED_FLAG: int
CMSG_ENVELOPED_FLAG: int
CMSG_SIGNED_AND_ENVELOPED_FLAG: int
CMSG_HASHED_FLAG: int
CMSG_ENCRYPTED_FLAG: int
CERT_ID_ISSUER_SERIAL_NUMBER: int
CERT_ID_KEY_IDENTIFIER: int
CERT_ID_SHA1_HASH: int
CMSG_KEY_AGREE_EPHEMERAL_KEY_CHOICE: int
CMSG_KEY_AGREE_STATIC_KEY_CHOICE: int
CMSG_KEY_TRANS_RECIPIENT: int
CMSG_KEY_AGREE_RECIPIENT: int
CMSG_SP3_COMPATIBLE_ENCRYPT_FLAG: int
CMSG_RC4_NO_SALT_FLAG: int
CMSG_INDEFINITE_LENGTH: int
CMSG_BARE_CONTENT_FLAG: int
CMSG_LENGTH_ONLY_FLAG: int
CMSG_DETACHED_FLAG: int
CMSG_AUTHENTICATED_ATTRIBUTES_FLAG: int
CMSG_CONTENTS_OCTETS_FLAG: int
CMSG_MAX_LENGTH_FLAG: int
CMSG_CMS_ENCAPSULATED_CONTENT_FLAG: int
CMSG_CRYPT_RELEASE_CONTEXT_FLAG: int
CMSG_TYPE_PARAM: int
CMSG_CONTENT_PARAM: int
CMSG_BARE_CONTENT_PARAM: int
CMSG_INNER_CONTENT_TYPE_PARAM: int
CMSG_SIGNER_COUNT_PARAM: int
CMSG_SIGNER_INFO_PARAM: int
CMSG_SIGNER_CERT_INFO_PARAM: int
CMSG_SIGNER_HASH_ALGORITHM_PARAM: int
CMSG_SIGNER_AUTH_ATTR_PARAM: int
CMSG_SIGNER_UNAUTH_ATTR_PARAM: int
CMSG_CERT_COUNT_PARAM: int
CMSG_CERT_PARAM: int
CMSG_CRL_COUNT_PARAM: int
CMSG_CRL_PARAM: int
CMSG_ENVELOPE_ALGORITHM_PARAM: int
CMSG_RECIPIENT_COUNT_PARAM: int
CMSG_RECIPIENT_INDEX_PARAM: int
CMSG_RECIPIENT_INFO_PARAM: int
CMSG_HASH_ALGORITHM_PARAM: int
CMSG_HASH_DATA_PARAM: int
CMSG_COMPUTED_HASH_PARAM: int
CMSG_ENCRYPT_PARAM: int
CMSG_ENCRYPTED_DIGEST: int
CMSG_ENCODED_SIGNER: int
CMSG_ENCODED_MESSAGE: int
CMSG_VERSION_PARAM: int
CMSG_ATTR_CERT_COUNT_PARAM: int
CMSG_ATTR_CERT_PARAM: int
CMSG_CMS_RECIPIENT_COUNT_PARAM: int
CMSG_CMS_RECIPIENT_INDEX_PARAM: int
CMSG_CMS_RECIPIENT_ENCRYPTED_KEY_INDEX_PARAM: int
CMSG_CMS_RECIPIENT_INFO_PARAM: int
CMSG_UNPROTECTED_ATTR_PARAM: int
CMSG_SIGNER_CERT_ID_PARAM: int
CMSG_CMS_SIGNER_INFO_PARAM: int
CMSG_SIGNED_DATA_V1: int
CMSG_SIGNED_DATA_V3: int
CMSG_SIGNED_DATA_PKCS_1_5_VERSION: int
CMSG_SIGNED_DATA_CMS_VERSION: int
CMSG_SIGNER_INFO_V1: int
CMSG_SIGNER_INFO_V3: int
CMSG_SIGNER_INFO_PKCS_1_5_VERSION: int
CMSG_SIGNER_INFO_CMS_VERSION: int
CMSG_HASHED_DATA_V0: int
CMSG_HASHED_DATA_V2: int
CMSG_HASHED_DATA_PKCS_1_5_VERSION: int
CMSG_HASHED_DATA_CMS_VERSION: int
CMSG_ENVELOPED_DATA_V0: int
CMSG_ENVELOPED_DATA_V2: int
CMSG_ENVELOPED_DATA_PKCS_1_5_VERSION: int
CMSG_ENVELOPED_DATA_CMS_VERSION: int
CMSG_KEY_AGREE_ORIGINATOR_CERT: int
CMSG_KEY_AGREE_ORIGINATOR_PUBLIC_KEY: int
CMSG_ENVELOPED_RECIPIENT_V0: int
CMSG_ENVELOPED_RECIPIENT_V2: int
CMSG_ENVELOPED_RECIPIENT_V3: int
CMSG_ENVELOPED_RECIPIENT_V4: int
CMSG_KEY_TRANS_PKCS_1_5_VERSION: int
CMSG_KEY_TRANS_CMS_VERSION: int
CMSG_KEY_AGREE_VERSION: int
CMSG_CTRL_VERIFY_SIGNATURE: int
CMSG_CTRL_DECRYPT: int
CMSG_CTRL_VERIFY_HASH: int
CMSG_CTRL_ADD_SIGNER: int
CMSG_CTRL_DEL_SIGNER: int
CMSG_CTRL_ADD_SIGNER_UNAUTH_ATTR: int
CMSG_CTRL_DEL_SIGNER_UNAUTH_ATTR: int
CMSG_CTRL_ADD_CERT: int
CMSG_CTRL_DEL_CERT: int
CMSG_CTRL_ADD_CRL: int
CMSG_CTRL_DEL_CRL: int
CMSG_CTRL_ADD_ATTR_CERT: int
CMSG_CTRL_DEL_ATTR_CERT: int
CMSG_CTRL_KEY_TRANS_DECRYPT: int
CMSG_CTRL_KEY_AGREE_DECRYPT: int
CMSG_CTRL_VERIFY_SIGNATURE_EX: int
CMSG_CTRL_ADD_CMS_SIGNER_INFO: int
CMSG_VERIFY_SIGNER_PUBKEY: int
CMSG_VERIFY_SIGNER_CERT: int
CMSG_VERIFY_SIGNER_CHAIN: int
CMSG_VERIFY_SIGNER_NULL: int
CMSG_OID_GEN_ENCRYPT_KEY_FUNC: str
CMSG_OID_EXPORT_ENCRYPT_KEY_FUNC: str
CMSG_OID_IMPORT_ENCRYPT_KEY_FUNC: str
CMSG_CONTENT_ENCRYPT_PAD_ENCODED_LEN_FLAG: int
CMSG_DEFAULT_INSTALLABLE_FUNC_OID: int
CMSG_CONTENT_ENCRYPT_FREE_PARA_FLAG: int
CMSG_CONTENT_ENCRYPT_RELEASE_CONTEXT_FLAG: int
CMSG_OID_GEN_CONTENT_ENCRYPT_KEY_FUNC: str
CMSG_KEY_TRANS_ENCRYPT_FREE_PARA_FLAG: int
CMSG_OID_EXPORT_KEY_TRANS_FUNC: str
CMSG_KEY_AGREE_ENCRYPT_FREE_PARA_FLAG: int
CMSG_KEY_AGREE_ENCRYPT_FREE_MATERIAL_FLAG: int
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_ALG_FLAG: int
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_PARA_FLAG: int
CMSG_KEY_AGREE_ENCRYPT_FREE_PUBKEY_BITS_FLAG: int
CMSG_OID_EXPORT_KEY_AGREE_FUNC: str
CMSG_OID_IMPORT_KEY_TRANS_FUNC: str
CMSG_OID_IMPORT_KEY_AGREE_FUNC: str
CERT_KEY_PROV_HANDLE_PROP_ID: int
CERT_KEY_PROV_INFO_PROP_ID: int
CERT_SHA1_HASH_PROP_ID: int
CERT_MD5_HASH_PROP_ID: int
CERT_HASH_PROP_ID: int
CERT_KEY_CONTEXT_PROP_ID: int
CERT_KEY_SPEC_PROP_ID: int
CERT_IE30_RESERVED_PROP_ID: int
CERT_PUBKEY_HASH_RESERVED_PROP_ID: int
CERT_ENHKEY_USAGE_PROP_ID: int
CERT_CTL_USAGE_PROP_ID: int
CERT_NEXT_UPDATE_LOCATION_PROP_ID: int
CERT_FRIENDLY_NAME_PROP_ID: int
CERT_PVK_FILE_PROP_ID: int
CERT_DESCRIPTION_PROP_ID: int
CERT_ACCESS_STATE_PROP_ID: int
CERT_SIGNATURE_HASH_PROP_ID: int
CERT_SMART_CARD_DATA_PROP_ID: int
CERT_EFS_PROP_ID: int
CERT_FORTEZZA_DATA_PROP_ID: int
CERT_ARCHIVED_PROP_ID: int
CERT_KEY_IDENTIFIER_PROP_ID: int
CERT_AUTO_ENROLL_PROP_ID: int
CERT_PUBKEY_ALG_PARA_PROP_ID: int
CERT_CROSS_CERT_DIST_POINTS_PROP_ID: int
CERT_ISSUER_PUBLIC_KEY_MD5_HASH_PROP_ID: int
CERT_SUBJECT_PUBLIC_KEY_MD5_HASH_PROP_ID: int
CERT_ENROLLMENT_PROP_ID: int
CERT_DATE_STAMP_PROP_ID: int
CERT_ISSUER_SERIAL_NUMBER_MD5_HASH_PROP_ID: int
CERT_SUBJECT_NAME_MD5_HASH_PROP_ID: int
CERT_EXTENDED_ERROR_INFO_PROP_ID: int
CERT_RENEWAL_PROP_ID: int
CERT_ARCHIVED_KEY_HASH_PROP_ID: int
CERT_AUTO_ENROLL_RETRY_PROP_ID: int
CERT_AIA_URL_RETRIEVED_PROP_ID: int
CERT_AUTHORITY_INFO_ACCESS_PROP_ID: int
CERT_BACKED_UP_PROP_ID: int
CERT_OCSP_RESPONSE_PROP_ID: int
CERT_REQUEST_ORIGINATOR_PROP_ID: int
CERT_SOURCE_LOCATION_PROP_ID: int
CERT_SOURCE_URL_PROP_ID: int
CERT_NEW_KEY_PROP_ID: int
CERT_OCSP_CACHE_PREFIX_PROP_ID: int
CERT_SMART_CARD_ROOT_INFO_PROP_ID: int
CERT_NO_AUTO_EXPIRE_CHECK_PROP_ID: int
CERT_NCRYPT_KEY_HANDLE_PROP_ID: int
CERT_HCRYPTPROV_OR_NCRYPT_KEY_HANDLE_PROP_ID: int
CERT_SUBJECT_INFO_ACCESS_PROP_ID: int
CERT_CA_OCSP_AUTHORITY_INFO_ACCESS_PROP_ID: int
CERT_CA_DISABLE_CRL_PROP_ID: int
CERT_ROOT_PROGRAM_CERT_POLICIES_PROP_ID: int
CERT_ROOT_PROGRAM_NAME_CONSTRAINTS_PROP_ID: int
CERT_SUBJECT_OCSP_AUTHORITY_INFO_ACCESS_PROP_ID: int
CERT_SUBJECT_DISABLE_CRL_PROP_ID: int
CERT_CEP_PROP_ID: int
CERT_SIGN_HASH_CNG_ALG_PROP_ID: int
CERT_SCARD_PIN_ID_PROP_ID: int
CERT_SCARD_PIN_INFO_PROP_ID: int
CERT_FIRST_RESERVED_PROP_ID: int
CERT_LAST_RESERVED_PROP_ID: int
CERT_FIRST_USER_PROP_ID: int
CERT_LAST_USER_PROP_ID: int
szOID_CERT_PROP_ID_PREFIX: str
szOID_CERT_KEY_IDENTIFIER_PROP_ID: str
szOID_CERT_ISSUER_SERIAL_NUMBER_MD5_HASH_PROP_ID: str
szOID_CERT_SUBJECT_NAME_MD5_HASH_PROP_ID: str
CERT_ACCESS_STATE_WRITE_PERSIST_FLAG: int
CERT_ACCESS_STATE_SYSTEM_STORE_FLAG: int
CERT_ACCESS_STATE_LM_SYSTEM_STORE_FLAG: int
CERT_SET_KEY_PROV_HANDLE_PROP_ID: int
CERT_SET_KEY_CONTEXT_PROP_ID: int
sz_CERT_STORE_PROV_MEMORY: str
sz_CERT_STORE_PROV_FILENAME_W: str
sz_CERT_STORE_PROV_FILENAME: str
sz_CERT_STORE_PROV_SYSTEM_W: str
sz_CERT_STORE_PROV_SYSTEM: str
sz_CERT_STORE_PROV_PKCS7: str
sz_CERT_STORE_PROV_SERIALIZED: str
sz_CERT_STORE_PROV_COLLECTION: str
sz_CERT_STORE_PROV_SYSTEM_REGISTRY_W: str
sz_CERT_STORE_PROV_SYSTEM_REGISTRY: str
sz_CERT_STORE_PROV_PHYSICAL_W: str
sz_CERT_STORE_PROV_PHYSICAL: str
sz_CERT_STORE_PROV_SMART_CARD_W: str
sz_CERT_STORE_PROV_SMART_CARD: str
sz_CERT_STORE_PROV_LDAP_W: str
sz_CERT_STORE_PROV_LDAP: str
CERT_STORE_SIGNATURE_FLAG: int
CERT_STORE_TIME_VALIDITY_FLAG: int
CERT_STORE_REVOCATION_FLAG: int
CERT_STORE_NO_CRL_FLAG: int
CERT_STORE_NO_ISSUER_FLAG: int
CERT_STORE_BASE_CRL_FLAG: int
CERT_STORE_DELTA_CRL_FLAG: int
CERT_STORE_NO_CRYPT_RELEASE_FLAG: int
CERT_STORE_SET_LOCALIZED_NAME_FLAG: int
CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG: int
CERT_STORE_DELETE_FLAG: int
CERT_STORE_UNSAFE_PHYSICAL_FLAG: int
CERT_STORE_SHARE_STORE_FLAG: int
CERT_STORE_SHARE_CONTEXT_FLAG: int
CERT_STORE_MANIFOLD_FLAG: int
CERT_STORE_ENUM_ARCHIVED_FLAG: int
CERT_STORE_UPDATE_KEYID_FLAG: int
CERT_STORE_BACKUP_RESTORE_FLAG: int
CERT_STORE_READONLY_FLAG: int
CERT_STORE_OPEN_EXISTING_FLAG: int
CERT_STORE_CREATE_NEW_FLAG: int
CERT_STORE_MAXIMUM_ALLOWED_FLAG: int
CERT_SYSTEM_STORE_MASK: int
CERT_SYSTEM_STORE_RELOCATE_FLAG: int
CERT_SYSTEM_STORE_UNPROTECTED_FLAG: int
CERT_SYSTEM_STORE_LOCATION_MASK: int
CERT_SYSTEM_STORE_LOCATION_SHIFT: int
CERT_SYSTEM_STORE_CURRENT_USER_ID: int
CERT_SYSTEM_STORE_LOCAL_MACHINE_ID: int
CERT_SYSTEM_STORE_CURRENT_SERVICE_ID: int
CERT_SYSTEM_STORE_SERVICES_ID: int
CERT_SYSTEM_STORE_USERS_ID: int
CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY_ID: int
CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY_ID: int
CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE_ID: int
CERT_SYSTEM_STORE_CURRENT_USER: int
CERT_SYSTEM_STORE_LOCAL_MACHINE: int
CERT_SYSTEM_STORE_CURRENT_SERVICE: int
CERT_SYSTEM_STORE_SERVICES: int
CERT_SYSTEM_STORE_USERS: int
CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY: int
CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY: int
CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE: int
CERT_PROT_ROOT_DISABLE_CURRENT_USER_FLAG: int
CERT_PROT_ROOT_INHIBIT_ADD_AT_INIT_FLAG: int
CERT_PROT_ROOT_INHIBIT_PURGE_LM_FLAG: int
CERT_PROT_ROOT_DISABLE_LM_AUTH_FLAG: int
CERT_PROT_ROOT_ONLY_LM_GPT_FLAG: int
CERT_PROT_ROOT_DISABLE_NT_AUTH_REQUIRED_FLAG: int
CERT_PROT_ROOT_DISABLE_NOT_DEFINED_NAME_CONSTRAINT_FLAG: int
CERT_TRUST_PUB_ALLOW_TRUST_MASK: int
CERT_TRUST_PUB_ALLOW_END_USER_TRUST: int
CERT_TRUST_PUB_ALLOW_MACHINE_ADMIN_TRUST: int
CERT_TRUST_PUB_ALLOW_ENTERPRISE_ADMIN_TRUST: int
CERT_TRUST_PUB_CHECK_PUBLISHER_REV_FLAG: int
CERT_TRUST_PUB_CHECK_TIMESTAMP_REV_FLAG: int
CERT_AUTH_ROOT_AUTO_UPDATE_LOCAL_MACHINE_REGPATH: str
CERT_AUTH_ROOT_AUTO_UPDATE_DISABLE_UNTRUSTED_ROOT_LOGGING_FLAG: int
CERT_AUTH_ROOT_AUTO_UPDATE_DISABLE_PARTIAL_CHAIN_LOGGING_FLAG: int
CERT_AUTH_ROOT_AUTO_UPDATE_ROOT_DIR_URL_VALUE_NAME: str
CERT_AUTH_ROOT_AUTO_UPDATE_SYNC_DELTA_TIME_VALUE_NAME: str
CERT_AUTH_ROOT_AUTO_UPDATE_FLAGS_VALUE_NAME: str
CERT_AUTH_ROOT_CTL_FILENAME: str
CERT_AUTH_ROOT_CTL_FILENAME_A: str
CERT_AUTH_ROOT_CAB_FILENAME: str
CERT_AUTH_ROOT_SEQ_FILENAME: str
CERT_AUTH_ROOT_CERT_EXT: str
CERT_GROUP_POLICY_SYSTEM_STORE_REGPATH: str
CERT_EFSBLOB_REGPATH: str
CERT_EFSBLOB_VALUE_NAME: str
CERT_PROT_ROOT_FLAGS_REGPATH: str
CERT_PROT_ROOT_FLAGS_VALUE_NAME: str
CERT_TRUST_PUB_SAFER_GROUP_POLICY_REGPATH: str
CERT_LOCAL_MACHINE_SYSTEM_STORE_REGPATH: str
CERT_TRUST_PUB_SAFER_LOCAL_MACHINE_REGPATH: str
CERT_TRUST_PUB_AUTHENTICODE_FLAGS_VALUE_NAME: str
CERT_OCM_SUBCOMPONENTS_LOCAL_MACHINE_REGPATH: str
CERT_OCM_SUBCOMPONENTS_ROOT_AUTO_UPDATE_VALUE_NAME: str
CERT_DISABLE_ROOT_AUTO_UPDATE_REGPATH: str
CERT_DISABLE_ROOT_AUTO_UPDATE_VALUE_NAME: str
CERT_REGISTRY_STORE_REMOTE_FLAG: int
CERT_REGISTRY_STORE_SERIALIZED_FLAG: int
CERT_REGISTRY_STORE_CLIENT_GPT_FLAG: int
CERT_REGISTRY_STORE_LM_GPT_FLAG: int
CERT_REGISTRY_STORE_ROAMING_FLAG: int
CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG: int
CERT_IE_DIRTY_FLAGS_REGPATH: str
CERT_FILE_STORE_COMMIT_ENABLE_FLAG: int
CERT_LDAP_STORE_SIGN_FLAG: int
CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG: int
CERT_LDAP_STORE_OPENED_FLAG: int
CERT_LDAP_STORE_UNBIND_FLAG: int
CRYPT_OID_OPEN_STORE_PROV_FUNC: str
CERT_STORE_PROV_EXTERNAL_FLAG: int
CERT_STORE_PROV_DELETED_FLAG: int
CERT_STORE_PROV_NO_PERSIST_FLAG: int
CERT_STORE_PROV_SYSTEM_STORE_FLAG: int
CERT_STORE_PROV_LM_SYSTEM_STORE_FLAG: int
CERT_STORE_PROV_CLOSE_FUNC: int
CERT_STORE_PROV_READ_CERT_FUNC: int
CERT_STORE_PROV_WRITE_CERT_FUNC: int
CERT_STORE_PROV_DELETE_CERT_FUNC: int
CERT_STORE_PROV_SET_CERT_PROPERTY_FUNC: int
CERT_STORE_PROV_READ_CRL_FUNC: int
CERT_STORE_PROV_WRITE_CRL_FUNC: int
CERT_STORE_PROV_DELETE_CRL_FUNC: int
CERT_STORE_PROV_SET_CRL_PROPERTY_FUNC: int
CERT_STORE_PROV_READ_CTL_FUNC: int
CERT_STORE_PROV_WRITE_CTL_FUNC: int
CERT_STORE_PROV_DELETE_CTL_FUNC: int
CERT_STORE_PROV_SET_CTL_PROPERTY_FUNC: int
CERT_STORE_PROV_CONTROL_FUNC: int
CERT_STORE_PROV_FIND_CERT_FUNC: int
CERT_STORE_PROV_FREE_FIND_CERT_FUNC: int
CERT_STORE_PROV_GET_CERT_PROPERTY_FUNC: int
CERT_STORE_PROV_FIND_CRL_FUNC: int
CERT_STORE_PROV_FREE_FIND_CRL_FUNC: int
CERT_STORE_PROV_GET_CRL_PROPERTY_FUNC: int
CERT_STORE_PROV_FIND_CTL_FUNC: int
CERT_STORE_PROV_FREE_FIND_CTL_FUNC: int
CERT_STORE_PROV_GET_CTL_PROPERTY_FUNC: int
CERT_STORE_PROV_WRITE_ADD_FLAG: int
CERT_STORE_SAVE_AS_STORE: int
CERT_STORE_SAVE_AS_PKCS7: int
CERT_STORE_SAVE_TO_FILE: int
CERT_STORE_SAVE_TO_MEMORY: int
CERT_STORE_SAVE_TO_FILENAME_A: int
CERT_STORE_SAVE_TO_FILENAME_W: int
CERT_STORE_SAVE_TO_FILENAME: int
CERT_CLOSE_STORE_FORCE_FLAG: int
CERT_CLOSE_STORE_CHECK_FLAG: int
CERT_COMPARE_MASK: int
CERT_COMPARE_SHIFT: int
CERT_COMPARE_ANY: int
CERT_COMPARE_SHA1_HASH: int
CERT_COMPARE_NAME: int
CERT_COMPARE_ATTR: int
CERT_COMPARE_MD5_HASH: int
CERT_COMPARE_PROPERTY: int
CERT_COMPARE_PUBLIC_KEY: int
CERT_COMPARE_HASH: int
CERT_COMPARE_NAME_STR_A: int
CERT_COMPARE_NAME_STR_W: int
CERT_COMPARE_KEY_SPEC: int
CERT_COMPARE_ENHKEY_USAGE: int
CERT_COMPARE_CTL_USAGE: int
CERT_COMPARE_SUBJECT_CERT: int
CERT_COMPARE_ISSUER_OF: int
CERT_COMPARE_EXISTING: int
CERT_COMPARE_SIGNATURE_HASH: int
CERT_COMPARE_KEY_IDENTIFIER: int
CERT_COMPARE_CERT_ID: int
CERT_COMPARE_CROSS_CERT_DIST_POINTS: int
CERT_COMPARE_PUBKEY_MD5_HASH: int
CERT_FIND_ANY: int
CERT_FIND_SHA1_HASH: int
CERT_FIND_MD5_HASH: int
CERT_FIND_SIGNATURE_HASH: int
CERT_FIND_KEY_IDENTIFIER: int
CERT_FIND_HASH: int
CERT_FIND_PROPERTY: int
CERT_FIND_PUBLIC_KEY: int
CERT_FIND_SUBJECT_NAME: int
CERT_FIND_SUBJECT_ATTR: int
CERT_FIND_ISSUER_NAME: int
CERT_FIND_ISSUER_ATTR: int
CERT_FIND_SUBJECT_STR_A: int
CERT_FIND_SUBJECT_STR_W: int
CERT_FIND_SUBJECT_STR: int
CERT_FIND_ISSUER_STR_A: int
CERT_FIND_ISSUER_STR_W: int
CERT_FIND_ISSUER_STR: int
CERT_FIND_KEY_SPEC: int
CERT_FIND_ENHKEY_USAGE: int
CERT_FIND_CTL_USAGE: int
CERT_FIND_SUBJECT_CERT: int
CERT_FIND_ISSUER_OF: int
CERT_FIND_EXISTING: int
CERT_FIND_CERT_ID: int
CERT_FIND_CROSS_CERT_DIST_POINTS: int
CERT_FIND_PUBKEY_MD5_HASH: int
CERT_FIND_OPTIONAL_ENHKEY_USAGE_FLAG: int
CERT_FIND_EXT_ONLY_ENHKEY_USAGE_FLAG: int
CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG: int
CERT_FIND_NO_ENHKEY_USAGE_FLAG: int
CERT_FIND_OR_ENHKEY_USAGE_FLAG: int
CERT_FIND_VALID_ENHKEY_USAGE_FLAG: int
CERT_FIND_OPTIONAL_CTL_USAGE_FLAG: int
CERT_FIND_EXT_ONLY_CTL_USAGE_FLAG: int
CERT_FIND_PROP_ONLY_CTL_USAGE_FLAG: int
CERT_FIND_NO_CTL_USAGE_FLAG: int
CERT_FIND_OR_CTL_USAGE_FLAG: int
CERT_FIND_VALID_CTL_USAGE_FLAG: int
CERT_SET_PROPERTY_IGNORE_PERSIST_ERROR_FLAG: int
CERT_SET_PROPERTY_INHIBIT_PERSIST_FLAG: int
CTL_ENTRY_FROM_PROP_CHAIN_FLAG: int
CRL_FIND_ANY: int
CRL_FIND_ISSUED_BY: int
CRL_FIND_EXISTING: int
CRL_FIND_ISSUED_FOR: int
CRL_FIND_ISSUED_BY_AKI_FLAG: int
CRL_FIND_ISSUED_BY_SIGNATURE_FLAG: int
CRL_FIND_ISSUED_BY_DELTA_FLAG: int
CRL_FIND_ISSUED_BY_BASE_FLAG: int
CERT_STORE_ADD_NEW: int
CERT_STORE_ADD_USE_EXISTING: int
CERT_STORE_ADD_REPLACE_EXISTING: int
CERT_STORE_ADD_ALWAYS: int
CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES: int
CERT_STORE_ADD_NEWER: int
CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES: int
CERT_STORE_CERTIFICATE_CONTEXT: int
CERT_STORE_CRL_CONTEXT: int
CERT_STORE_CTL_CONTEXT: int
CERT_STORE_ALL_CONTEXT_FLAG: int
CERT_STORE_CERTIFICATE_CONTEXT_FLAG: int
CERT_STORE_CRL_CONTEXT_FLAG: int
CERT_STORE_CTL_CONTEXT_FLAG: int
CTL_ANY_SUBJECT_TYPE: int
CTL_CERT_SUBJECT_TYPE: int
CTL_FIND_ANY: int
CTL_FIND_SHA1_HASH: int
CTL_FIND_MD5_HASH: int
CTL_FIND_USAGE: int
CTL_FIND_SUBJECT: int
CTL_FIND_EXISTING: int
CTL_FIND_SAME_USAGE_FLAG: int
CERT_STORE_CTRL_RESYNC: int
CERT_STORE_CTRL_NOTIFY_CHANGE: int
CERT_STORE_CTRL_COMMIT: int
CERT_STORE_CTRL_AUTO_RESYNC: int
CERT_STORE_CTRL_CANCEL_NOTIFY: int
CERT_STORE_CTRL_INHIBIT_DUPLICATE_HANDLE_FLAG: int
CERT_STORE_CTRL_COMMIT_FORCE_FLAG: int
CERT_STORE_CTRL_COMMIT_CLEAR_FLAG: int
CERT_STORE_LOCALIZED_NAME_PROP_ID: int
CERT_CREATE_CONTEXT_NOCOPY_FLAG: int
CERT_CREATE_CONTEXT_SORTED_FLAG: int
CERT_CREATE_CONTEXT_NO_HCRYPTMSG_FLAG: int
CERT_CREATE_CONTEXT_NO_ENTRY_FLAG: int
CERT_PHYSICAL_STORE_ADD_ENABLE_FLAG: int
CERT_PHYSICAL_STORE_OPEN_DISABLE_FLAG: int
CERT_PHYSICAL_STORE_REMOTE_OPEN_DISABLE_FLAG: int
CERT_PHYSICAL_STORE_INSERT_COMPUTER_NAME_ENABLE_FLAG: int
CERT_PHYSICAL_STORE_PREDEFINED_ENUM_FLAG: int
CERT_PHYSICAL_STORE_DEFAULT_NAME: str
CERT_PHYSICAL_STORE_GROUP_POLICY_NAME: str
CERT_PHYSICAL_STORE_LOCAL_MACHINE_NAME: str
CERT_PHYSICAL_STORE_DS_USER_CERTIFICATE_NAME: str
CERT_PHYSICAL_STORE_LOCAL_MACHINE_GROUP_POLICY_NAME: str
CERT_PHYSICAL_STORE_ENTERPRISE_NAME: str
CERT_PHYSICAL_STORE_AUTH_ROOT_NAME: str
CERT_PHYSICAL_STORE_SMART_CARD_NAME: str
CRYPT_OID_OPEN_SYSTEM_STORE_PROV_FUNC: str
CRYPT_OID_REGISTER_SYSTEM_STORE_FUNC: str
CRYPT_OID_UNREGISTER_SYSTEM_STORE_FUNC: str
CRYPT_OID_ENUM_SYSTEM_STORE_FUNC: str
CRYPT_OID_REGISTER_PHYSICAL_STORE_FUNC: str
CRYPT_OID_UNREGISTER_PHYSICAL_STORE_FUNC: str
CRYPT_OID_ENUM_PHYSICAL_STORE_FUNC: str
CRYPT_OID_SYSTEM_STORE_LOCATION_VALUE_NAME: str
CMSG_TRUSTED_SIGNER_FLAG: int
CMSG_SIGNER_ONLY_FLAG: int
CMSG_USE_SIGNER_INDEX_FLAG: int
CMSG_CMS_ENCAPSULATED_CTL_FLAG: int
CMSG_ENCODE_SORTED_CTL_FLAG: int
CMSG_ENCODE_HASHED_SUBJECT_IDENTIFIER_FLAG: int
CERT_VERIFY_INHIBIT_CTL_UPDATE_FLAG: int
CERT_VERIFY_TRUSTED_SIGNERS_FLAG: int
CERT_VERIFY_NO_TIME_CHECK_FLAG: int
CERT_VERIFY_ALLOW_MORE_USAGE_FLAG: int
CERT_VERIFY_UPDATED_CTL_FLAG: int
CERT_CONTEXT_REVOCATION_TYPE: int
CERT_VERIFY_REV_CHAIN_FLAG: int
CERT_VERIFY_CACHE_ONLY_BASED_REVOCATION: int
CERT_VERIFY_REV_ACCUMULATIVE_TIMEOUT_FLAG: int
CERT_UNICODE_IS_RDN_ATTRS_FLAG: int
CERT_CASE_INSENSITIVE_IS_RDN_ATTRS_FLAG: int
CRYPT_VERIFY_CERT_SIGN_SUBJECT_BLOB: int
CRYPT_VERIFY_CERT_SIGN_SUBJECT_CERT: int
CRYPT_VERIFY_CERT_SIGN_SUBJECT_CRL: int
CRYPT_VERIFY_CERT_SIGN_ISSUER_PUBKEY: int
CRYPT_VERIFY_CERT_SIGN_ISSUER_CERT: int
CRYPT_VERIFY_CERT_SIGN_ISSUER_CHAIN: int
CRYPT_VERIFY_CERT_SIGN_ISSUER_NULL: int
CRYPT_DEFAULT_CONTEXT_AUTO_RELEASE_FLAG: int
CRYPT_DEFAULT_CONTEXT_PROCESS_FLAG: int
CRYPT_DEFAULT_CONTEXT_CERT_SIGN_OID: int
CRYPT_DEFAULT_CONTEXT_MULTI_CERT_SIGN_OID: int
CRYPT_OID_EXPORT_PUBLIC_KEY_INFO_FUNC: str
CRYPT_OID_IMPORT_PUBLIC_KEY_INFO_FUNC: str
CRYPT_ACQUIRE_CACHE_FLAG: int
CRYPT_ACQUIRE_USE_PROV_INFO_FLAG: int
CRYPT_ACQUIRE_COMPARE_KEY_FLAG: int
CRYPT_ACQUIRE_SILENT_FLAG: int
CRYPT_FIND_USER_KEYSET_FLAG: int
CRYPT_FIND_MACHINE_KEYSET_FLAG: int
CRYPT_FIND_SILENT_KEYSET_FLAG: int
CRYPT_OID_IMPORT_PRIVATE_KEY_INFO_FUNC: str
CRYPT_OID_EXPORT_PRIVATE_KEY_INFO_FUNC: str
CRYPT_DELETE_KEYSET: int
CERT_SIMPLE_NAME_STR: int
CERT_OID_NAME_STR: int
CERT_X500_NAME_STR: int
CERT_NAME_STR_SEMICOLON_FLAG: int
CERT_NAME_STR_NO_PLUS_FLAG: int
CERT_NAME_STR_NO_QUOTING_FLAG: int
CERT_NAME_STR_CRLF_FLAG: int
CERT_NAME_STR_COMMA_FLAG: int
CERT_NAME_STR_REVERSE_FLAG: int
CERT_NAME_STR_DISABLE_IE4_UTF8_FLAG: int
CERT_NAME_STR_ENABLE_T61_UNICODE_FLAG: int
CERT_NAME_STR_ENABLE_UTF8_UNICODE_FLAG: int
CERT_NAME_EMAIL_TYPE: int
CERT_NAME_RDN_TYPE: int
CERT_NAME_ATTR_TYPE: int
CERT_NAME_SIMPLE_DISPLAY_TYPE: int
CERT_NAME_FRIENDLY_DISPLAY_TYPE: int
CERT_NAME_DNS_TYPE: int
CERT_NAME_URL_TYPE: int
CERT_NAME_UPN_TYPE: int
CERT_NAME_ISSUER_FLAG: int
CERT_NAME_DISABLE_IE4_UTF8_FLAG: int
CRYPT_MESSAGE_BARE_CONTENT_OUT_FLAG: int
CRYPT_MESSAGE_ENCAPSULATED_CONTENT_OUT_FLAG: int
CRYPT_MESSAGE_KEYID_SIGNER_FLAG: int
CRYPT_MESSAGE_SILENT_KEYSET_FLAG: int
CRYPT_MESSAGE_KEYID_RECIPIENT_FLAG: int
CERT_QUERY_OBJECT_FILE: int
CERT_QUERY_OBJECT_BLOB: int
CERT_QUERY_CONTENT_CERT: int
CERT_QUERY_CONTENT_CTL: int
CERT_QUERY_CONTENT_CRL: int
CERT_QUERY_CONTENT_SERIALIZED_STORE: int
CERT_QUERY_CONTENT_SERIALIZED_CERT: int
CERT_QUERY_CONTENT_SERIALIZED_CTL: int
CERT_QUERY_CONTENT_SERIALIZED_CRL: int
CERT_QUERY_CONTENT_PKCS7_SIGNED: int
CERT_QUERY_CONTENT_PKCS7_UNSIGNED: int
CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED: int
CERT_QUERY_CONTENT_PKCS10: int
CERT_QUERY_CONTENT_PFX: int
CERT_QUERY_CONTENT_CERT_PAIR: int
CERT_QUERY_CONTENT_FLAG_CERT: int
CERT_QUERY_CONTENT_FLAG_CTL: int
CERT_QUERY_CONTENT_FLAG_CRL: int
CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE: int
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT: int
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL: int
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL: int
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED: int
CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED: int
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED: int
CERT_QUERY_CONTENT_FLAG_PKCS10: int
CERT_QUERY_CONTENT_FLAG_PFX: int
CERT_QUERY_CONTENT_FLAG_CERT_PAIR: int
CERT_QUERY_CONTENT_FLAG_ALL: int
CERT_QUERY_FORMAT_BINARY: int
CERT_QUERY_FORMAT_BASE64_ENCODED: int
CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED: int
CERT_QUERY_FORMAT_FLAG_BINARY: int
CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED: int
CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED: int
CERT_QUERY_FORMAT_FLAG_ALL: int
CREDENTIAL_OID_PASSWORD_CREDENTIALS_A: int
CREDENTIAL_OID_PASSWORD_CREDENTIALS_W: int
CREDENTIAL_OID_PASSWORD_CREDENTIALS: int
SCHEME_OID_RETRIEVE_ENCODED_OBJECT_FUNC: str
SCHEME_OID_RETRIEVE_ENCODED_OBJECTW_FUNC: str
CONTEXT_OID_CREATE_OBJECT_CONTEXT_FUNC: str
CONTEXT_OID_CERTIFICATE: int
CONTEXT_OID_CRL: int
CONTEXT_OID_CTL: int
CONTEXT_OID_PKCS7: int
CONTEXT_OID_CAPI2_ANY: int
CONTEXT_OID_OCSP_RESP: int
CRYPT_RETRIEVE_MULTIPLE_OBJECTS: int
CRYPT_CACHE_ONLY_RETRIEVAL: int
CRYPT_WIRE_ONLY_RETRIEVAL: int
CRYPT_DONT_CACHE_RESULT: int
CRYPT_ASYNC_RETRIEVAL: int
CRYPT_STICKY_CACHE_RETRIEVAL: int
CRYPT_LDAP_SCOPE_BASE_ONLY_RETRIEVAL: int
CRYPT_OFFLINE_CHECK_RETRIEVAL: int
CRYPT_LDAP_INSERT_ENTRY_ATTRIBUTE: int
CRYPT_LDAP_SIGN_RETRIEVAL: int
CRYPT_NO_AUTH_RETRIEVAL: int
CRYPT_LDAP_AREC_EXCLUSIVE_RETRIEVAL: int
CRYPT_AIA_RETRIEVAL: int
CRYPT_VERIFY_CONTEXT_SIGNATURE: int
CRYPT_VERIFY_DATA_HASH: int
CRYPT_KEEP_TIME_VALID: int
CRYPT_DONT_VERIFY_SIGNATURE: int
CRYPT_DONT_CHECK_TIME_VALIDITY: int
CRYPT_CHECK_FRESHNESS_TIME_VALIDITY: int
CRYPT_ACCUMULATIVE_TIMEOUT: int
CRYPT_PARAM_ASYNC_RETRIEVAL_COMPLETION: int
CRYPT_PARAM_CANCEL_ASYNC_RETRIEVAL: int
CRYPT_GET_URL_FROM_PROPERTY: int
CRYPT_GET_URL_FROM_EXTENSION: int
CRYPT_GET_URL_FROM_UNAUTH_ATTRIBUTE: int
CRYPT_GET_URL_FROM_AUTH_ATTRIBUTE: int
URL_OID_GET_OBJECT_URL_FUNC: str
TIME_VALID_OID_GET_OBJECT_FUNC: str
TIME_VALID_OID_FLUSH_OBJECT_FUNC: str
TIME_VALID_OID_GET_CTL: int
TIME_VALID_OID_GET_CRL: int
TIME_VALID_OID_GET_CRL_FROM_CERT: int
TIME_VALID_OID_GET_FRESHEST_CRL_FROM_CERT: int
TIME_VALID_OID_GET_FRESHEST_CRL_FROM_CRL: int
TIME_VALID_OID_FLUSH_CTL: int
TIME_VALID_OID_FLUSH_CRL: int
TIME_VALID_OID_FLUSH_CRL_FROM_CERT: int
TIME_VALID_OID_FLUSH_FRESHEST_CRL_FROM_CERT: int
TIME_VALID_OID_FLUSH_FRESHEST_CRL_FROM_CRL: int
CRYPTPROTECT_PROMPT_ON_UNPROTECT: int
CRYPTPROTECT_PROMPT_ON_PROTECT: int
CRYPTPROTECT_PROMPT_RESERVED: int
CRYPTPROTECT_PROMPT_STRONG: int
CRYPTPROTECT_PROMPT_REQUIRE_STRONG: int
CRYPTPROTECT_UI_FORBIDDEN: int
CRYPTPROTECT_LOCAL_MACHINE: int
CRYPTPROTECT_CRED_SYNC: int
CRYPTPROTECT_AUDIT: int
CRYPTPROTECT_NO_RECOVERY: int
CRYPTPROTECT_VERIFY_PROTECTION: int
CRYPTPROTECT_CRED_REGENERATE: int
CRYPTPROTECT_FIRST_RESERVED_FLAGVAL: int
CRYPTPROTECT_LAST_RESERVED_FLAGVAL: int
CRYPTPROTECTMEMORY_BLOCK_SIZE: int
CRYPTPROTECTMEMORY_SAME_PROCESS: int
CRYPTPROTECTMEMORY_CROSS_PROCESS: int
CRYPTPROTECTMEMORY_SAME_LOGON: int
CERT_CREATE_SELFSIGN_NO_SIGN: int
CERT_CREATE_SELFSIGN_NO_KEY_INFO: int
CRYPT_KEYID_MACHINE_FLAG: int
CRYPT_KEYID_ALLOC_FLAG: int
CRYPT_KEYID_DELETE_FLAG: int
CRYPT_KEYID_SET_NEW_FLAG: int
CERT_CHAIN_MAX_AIA_URL_COUNT_IN_CERT_DEFAULT: int
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_COUNT_PER_CHAIN_DEFAULT: int
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_BYTE_COUNT_DEFAULT: int
CERT_CHAIN_MAX_AIA_URL_RETRIEVAL_CERT_COUNT_DEFAULT: int
CERT_CHAIN_CACHE_END_CERT: int
CERT_CHAIN_THREAD_STORE_SYNC: int
CERT_CHAIN_CACHE_ONLY_URL_RETRIEVAL: int
CERT_CHAIN_USE_LOCAL_MACHINE_STORE: int
CERT_CHAIN_ENABLE_CACHE_AUTO_UPDATE: int
CERT_CHAIN_ENABLE_SHARE_STORE: int
CERT_TRUST_NO_ERROR: int
CERT_TRUST_IS_NOT_TIME_VALID: int
CERT_TRUST_IS_NOT_TIME_NESTED: int
CERT_TRUST_IS_REVOKED: int
CERT_TRUST_IS_NOT_SIGNATURE_VALID: int
CERT_TRUST_IS_NOT_VALID_FOR_USAGE: int
CERT_TRUST_IS_UNTRUSTED_ROOT: int
CERT_TRUST_REVOCATION_STATUS_UNKNOWN: int
CERT_TRUST_IS_CYCLIC: int
CERT_TRUST_INVALID_EXTENSION: int
CERT_TRUST_INVALID_POLICY_CONSTRAINTS: int
CERT_TRUST_INVALID_BASIC_CONSTRAINTS: int
CERT_TRUST_INVALID_NAME_CONSTRAINTS: int
CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT: int
CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT: int
CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT: int
CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT: int
CERT_TRUST_IS_OFFLINE_REVOCATION: int
CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY: int
CERT_TRUST_IS_PARTIAL_CHAIN: int
CERT_TRUST_CTL_IS_NOT_TIME_VALID: int
CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID: int
CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE: int
CERT_TRUST_HAS_EXACT_MATCH_ISSUER: int
CERT_TRUST_HAS_KEY_MATCH_ISSUER: int
CERT_TRUST_HAS_NAME_MATCH_ISSUER: int
CERT_TRUST_IS_SELF_SIGNED: int
CERT_TRUST_HAS_PREFERRED_ISSUER: int
CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY: int
CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS: int
CERT_TRUST_IS_COMPLEX_CHAIN: int
USAGE_MATCH_TYPE_AND: int
USAGE_MATCH_TYPE_OR: int
CERT_CHAIN_REVOCATION_CHECK_END_CERT: int
CERT_CHAIN_REVOCATION_CHECK_CHAIN: int
CERT_CHAIN_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT: int
CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY: int
CERT_CHAIN_REVOCATION_ACCUMULATIVE_TIMEOUT: int
CERT_CHAIN_DISABLE_PASS1_QUALITY_FILTERING: int
CERT_CHAIN_RETURN_LOWER_QUALITY_CONTEXTS: int
CERT_CHAIN_DISABLE_AUTH_ROOT_AUTO_UPDATE: int
CERT_CHAIN_TIMESTAMP_TIME: int
REVOCATION_OID_CRL_REVOCATION: int
CERT_CHAIN_FIND_BY_ISSUER: int
CERT_CHAIN_FIND_BY_ISSUER_COMPARE_KEY_FLAG: int
CERT_CHAIN_FIND_BY_ISSUER_COMPLEX_CHAIN_FLAG: int
CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_URL_FLAG: int
CERT_CHAIN_FIND_BY_ISSUER_LOCAL_MACHINE_FLAG: int
CERT_CHAIN_FIND_BY_ISSUER_NO_KEY_FLAG: int
CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_FLAG: int
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_VALID_FLAG: int
CERT_CHAIN_POLICY_IGNORE_CTL_NOT_TIME_VALID_FLAG: int
CERT_CHAIN_POLICY_IGNORE_NOT_TIME_NESTED_FLAG: int
CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG: int
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS: int
CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG: int
CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG: int
CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG: int
CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG: int
CERT_CHAIN_POLICY_IGNORE_END_REV_UNKNOWN_FLAG: int
CERT_CHAIN_POLICY_IGNORE_CTL_SIGNER_REV_UNKNOWN_FLAG: int
CERT_CHAIN_POLICY_IGNORE_CA_REV_UNKNOWN_FLAG: int
CERT_CHAIN_POLICY_IGNORE_ROOT_REV_UNKNOWN_FLAG: int
CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS: int
CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG: int
CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG: int
CRYPT_OID_VERIFY_CERTIFICATE_CHAIN_POLICY_FUNC: str
AUTHTYPE_CLIENT: int
AUTHTYPE_SERVER: int
BASIC_CONSTRAINTS_CERT_CHAIN_POLICY_CA_FLAG: int
BASIC_CONSTRAINTS_CERT_CHAIN_POLICY_END_ENTITY_FLAG: int
MICROSOFT_ROOT_CERT_CHAIN_POLICY_ENABLE_TEST_ROOT_FLAG: int
CRYPT_STRING_BASE64HEADER: int
CRYPT_STRING_BASE64: int
CRYPT_STRING_BINARY: int
CRYPT_STRING_BASE64REQUESTHEADER: int
CRYPT_STRING_HEX: int
CRYPT_STRING_HEXASCII: int
CRYPT_STRING_BASE64_ANY: int
CRYPT_STRING_ANY: int
CRYPT_STRING_HEX_ANY: int
CRYPT_STRING_BASE64X509CRLHEADER: int
CRYPT_STRING_HEXADDR: int
CRYPT_STRING_HEXASCIIADDR: int
CRYPT_STRING_NOCR: int
CRYPT_USER_KEYSET: int
PKCS12_IMPORT_RESERVED_MASK: int
REPORT_NO_PRIVATE_KEY: int
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY: int
EXPORT_PRIVATE_KEYS: int
PKCS12_EXPORT_RESERVED_MASK: int
CERT_STORE_PROV_MSG: int
CERT_STORE_PROV_MEMORY: int
CERT_STORE_PROV_FILE: int
CERT_STORE_PROV_REG: int
CERT_STORE_PROV_PKCS7: int
CERT_STORE_PROV_SERIALIZED: int
CERT_STORE_PROV_FILENAME: int
CERT_STORE_PROV_SYSTEM: int
CERT_STORE_PROV_COLLECTION: int
CERT_STORE_PROV_SYSTEM_REGISTRY: int
CERT_STORE_PROV_PHYSICAL: int
CERT_STORE_PROV_SMART_CARD: int
CERT_STORE_PROV_LDAP: int
URL_OID_CERTIFICATE_ISSUER: int
URL_OID_CERTIFICATE_CRL_DIST_POINT: int
URL_OID_CTL_ISSUER: int
URL_OID_CTL_NEXT_UPDATE: int
URL_OID_CRL_ISSUER: int
URL_OID_CERTIFICATE_FRESHEST_CRL: int
URL_OID_CRL_FRESHEST_CRL: int
URL_OID_CROSS_CERT_DIST_POINT: int
URL_OID_CERTIFICATE_OCSP: int
URL_OID_CERTIFICATE_OCSP_AND_CRL_DIST_POINT: int
URL_OID_CERTIFICATE_CRL_DIST_POINT_AND_OCSP: int
URL_OID_CROSS_CERT_SUBJECT_INFO_ACCESS: int
URL_OID_CERTIFICATE_ONLY_OCSP: int
CMSG_CTRL_MAIL_LIST_DECRYPT: int
CMSG_MAIL_LIST_ENCRYPT_FREE_PARA_FLAG: int
CMSG_MAIL_LIST_HANDLE_KEY_CHOICE: int
CMSG_MAIL_LIST_RECIPIENT: int
CMSG_MAIL_LIST_VERSION: int
CMSG_OID_EXPORT_MAIL_LIST_FUNC: str
CMSG_OID_IMPORT_MAIL_LIST_FUNC: str
CTL_FIND_NO_LIST_ID_CBDATA: int
szOID_AUTHORITY_REVOCATION_LIST: str
szOID_CERTIFICATE_REVOCATION_LIST: str
szOID_ROOT_LIST_SIGNER: str
|
502c1a3dc817cc5e2dcede8432c3104f8ac6a80c
|
d91d19da3589c3f69a834bbb9834386e80f100e0
|
/datashader/glyphs/line.py
|
05baad0cf61b4132a9fc5e3990624877914101e7
|
[] |
permissive
|
holoviz/datashader
|
11d518371e974c02ba3843871e3e0905e0c83956
|
b510594eb771d14cff3b69efca8ddd37ca3a1046
|
refs/heads/main
| 2023-08-18T13:55:24.214980
| 2023-08-17T08:45:48
| 2023-08-17T08:45:48
| 48,504,165
| 1,040
| 133
|
BSD-3-Clause
| 2023-09-11T09:51:30
| 2015-12-23T18:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 66,326
|
py
|
line.py
|
from __future__ import annotations
import math
import numpy as np
from toolz import memoize
from datashader.antialias import two_stage_agg
from datashader.glyphs.points import _PointLike, _GeometryLike
from datashader.utils import isnull, isreal, ngjit
from numba import cuda
import numba.types as nb_types
try:
import cudf
import cupy as cp
from ..transfer_functions._cuda_utils import cuda_args
except ImportError:
cudf = None
cuda_args = None
try:
import spatialpandas
except Exception:
spatialpandas = None
class _AntiAliasedLine(object):
""" Methods common to all lines. """
_line_width = 0 # Use antialiasing if > 0.
def set_line_width(self, line_width):
self._line_width = line_width
if hasattr(self, "antialiased"):
self.antialiased = (line_width > 0)
def _build_extend(self, x_mapper, y_mapper, info, append, antialias_stage_2, antialias_stage_2_funcs):
return self._internal_build_extend(
x_mapper, y_mapper, info, append, self._line_width, antialias_stage_2, antialias_stage_2_funcs)
class LineAxis0(_PointLike, _AntiAliasedLine):
"""A line, with vertices defined by ``x`` and ``y``.
Parameters
----------
x, y : str
Column names for the x and y coordinates of each vertex.
"""
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu, extend_cuda = _build_extend_line_axis0(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_name = self.x
y_name = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
if cudf and isinstance(df, cudf.DataFrame):
xs = self.to_cupy_array(df, x_name)
ys = self.to_cupy_array(df, y_name)
do_extend = extend_cuda[cuda_args(xs.shape)]
else:
xs = df.loc[:, x_name].to_numpy()
ys = df.loc[:, y_name].to_numpy()
do_extend = extend_cpu
# line may be clipped, then mapped to pixels
do_extend(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, plot_start, antialias_stage_2, *aggs_and_cols
)
return extend
class LineAxis0Multi(_PointLike, _AntiAliasedLine):
"""
"""
def validate(self, in_dshape):
if not all([isreal(in_dshape.measure[str(xcol)]) for xcol in self.x]):
raise ValueError('x columns must be real')
elif not all([isreal(in_dshape.measure[str(ycol)]) for ycol in self.y]):
raise ValueError('y columns must be real')
if len(self.x) != len(self.y):
raise ValueError(
f'x and y coordinate lengths do not match: {len(self.x)} != {len(self.y)}')
@property
def x_label(self):
return 'x'
@property
def y_label(self):
return 'y'
def required_columns(self):
return self.x + self.y
def compute_x_bounds(self, df):
bounds_list = [self._compute_bounds(df[x])
for x in self.x]
mins, maxes = zip(*bounds_list)
return self.maybe_expand_bounds((min(mins), max(maxes)))
def compute_y_bounds(self, df):
bounds_list = [self._compute_bounds(df[y])
for y in self.y]
mins, maxes = zip(*bounds_list)
return self.maybe_expand_bounds((min(mins), max(maxes)))
@memoize
def compute_bounds_dask(self, ddf):
r = ddf.map_partitions(lambda df: np.array([[
np.nanmin([np.nanmin(df[c].values).item() for c in self.x]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.x]),
np.nanmin([np.nanmin(df[c].values).item() for c in self.y]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.y])]]
)).compute()
x_extents = np.nanmin(r[:, 0]), np.nanmax(r[:, 1])
y_extents = np.nanmin(r[:, 2]), np.nanmax(r[:, 3])
return (self.maybe_expand_bounds(x_extents),
self.maybe_expand_bounds(y_extents))
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu, extend_cuda = _build_extend_line_axis0_multi(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_names = self.x
y_names = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
if cudf and isinstance(df, cudf.DataFrame):
xs = self.to_cupy_array(df, x_names)
ys = self.to_cupy_array(df, y_names)
do_extend = extend_cuda[cuda_args(xs.shape)]
else:
xs = df.loc[:, list(x_names)].to_numpy()
ys = df.loc[:, list(y_names)].to_numpy()
do_extend = extend_cpu
# line may be clipped, then mapped to pixels
do_extend(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, plot_start, antialias_stage_2, *aggs_and_cols,
)
return extend
class LinesAxis1(_PointLike, _AntiAliasedLine):
"""A collection of lines (on line per row) with vertices defined
by the lists of columns in ``x`` and ``y``
Parameters
----------
x, y : list
Lists of column names for the x and y coordinates
"""
def validate(self, in_dshape):
if not all([isreal(in_dshape.measure[str(xcol)])
for xcol in self.x]):
raise ValueError('x columns must be real')
elif not all([isreal(in_dshape.measure[str(ycol)])
for ycol in self.y]):
raise ValueError('y columns must be real')
unique_x_measures = set(in_dshape.measure[str(xcol)]
for xcol in self.x)
if len(unique_x_measures) > 1:
raise ValueError('x columns must have the same data type')
unique_y_measures = set(in_dshape.measure[str(ycol)]
for ycol in self.y)
if len(unique_y_measures) > 1:
raise ValueError('y columns must have the same data type')
if len(self.x) != len(self.y):
raise ValueError(
f'x and y coordinate lengths do not match: {len(self.x)} != {len(self.y)}')
def required_columns(self):
return self.x + self.y
@property
def x_label(self):
return 'x'
@property
def y_label(self):
return 'y'
def compute_x_bounds(self, df):
xs = tuple(df[xlabel] for xlabel in self.x)
bounds_list = [self._compute_bounds(xcol) for xcol in xs]
mins, maxes = zip(*bounds_list)
return self.maybe_expand_bounds((min(mins), max(maxes)))
def compute_y_bounds(self, df):
ys = tuple(df[ylabel] for ylabel in self.y)
bounds_list = [self._compute_bounds(ycol) for ycol in ys]
mins, maxes = zip(*bounds_list)
return self.maybe_expand_bounds((min(mins), max(maxes)))
@memoize
def compute_bounds_dask(self, ddf):
r = ddf.map_partitions(lambda df: np.array([[
np.nanmin([np.nanmin(df[c].values).item() for c in self.x]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.x]),
np.nanmin([np.nanmin(df[c].values).item() for c in self.y]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.y])]]
)).compute()
x_extents = np.nanmin(r[:, 0]), np.nanmax(r[:, 1])
y_extents = np.nanmin(r[:, 2]), np.nanmax(r[:, 3])
return (self.maybe_expand_bounds(x_extents),
self.maybe_expand_bounds(y_extents))
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu, extend_cuda = _build_extend_line_axis1_none_constant(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_names = self.x
y_names = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
if cudf and isinstance(df, cudf.DataFrame):
xs = self.to_cupy_array(df, x_names)
ys = self.to_cupy_array(df, y_names)
do_extend = extend_cuda[cuda_args(xs.shape)]
else:
xs = df.loc[:, list(x_names)].to_numpy()
ys = df.loc[:, list(y_names)].to_numpy()
do_extend = extend_cpu
do_extend(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
)
return extend
class LinesAxis1XConstant(LinesAxis1):
"""
"""
def validate(self, in_dshape):
if not all([isreal(in_dshape.measure[str(ycol)]) for ycol in self.y]):
raise ValueError('y columns must be real')
unique_y_measures = set(in_dshape.measure[str(ycol)]
for ycol in self.y)
if len(unique_y_measures) > 1:
raise ValueError('y columns must have the same data type')
if len(self.x) != len(self.y):
raise ValueError(
f'x and y coordinate lengths do not match: {len(self.x)} != {len(self.y)}')
def required_columns(self):
return self.y
def compute_x_bounds(self, *args):
x_min = np.nanmin(self.x)
x_max = np.nanmax(self.x)
return self.maybe_expand_bounds((x_min, x_max))
@memoize
def compute_bounds_dask(self, ddf):
r = ddf.map_partitions(lambda df: np.array([[
np.nanmin([np.nanmin(df[c].values).item() for c in self.y]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.y])]]
)).compute()
y_extents = np.nanmin(r[:, 0]), np.nanmax(r[:, 1])
return (self.compute_x_bounds(),
self.maybe_expand_bounds(y_extents))
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu, extend_cuda = _build_extend_line_axis1_x_constant(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_values = self.x
y_names = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
if cudf and isinstance(df, cudf.DataFrame):
xs = cp.asarray(x_values)
ys = self.to_cupy_array(df, y_names)
do_extend = extend_cuda[cuda_args(ys.shape)]
else:
xs = x_values
ys = df.loc[:, list(y_names)].to_numpy()
do_extend = extend_cpu
do_extend(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
)
return extend
class LinesAxis1YConstant(LinesAxis1):
"""
"""
def validate(self, in_dshape):
if not all([isreal(in_dshape.measure[str(xcol)]) for xcol in self.x]):
raise ValueError('x columns must be real')
unique_x_measures = set(in_dshape.measure[str(xcol)]
for xcol in self.x)
if len(unique_x_measures) > 1:
raise ValueError('x columns must have the same data type')
if len(self.x) != len(self.y):
raise ValueError(
f'x and y coordinate lengths do not match: {len(self.x)} != {len(self.y)}')
def required_columns(self):
return self.x
def compute_y_bounds(self, *args):
y_min = np.nanmin(self.y)
y_max = np.nanmax(self.y)
return self.maybe_expand_bounds((y_min, y_max))
@memoize
def compute_bounds_dask(self, ddf):
r = ddf.map_partitions(lambda df: np.array([[
np.nanmin([np.nanmin(df[c].values).item() for c in self.x]),
np.nanmax([np.nanmax(df[c].values).item() for c in self.x])]]
)).compute()
x_extents = np.nanmin(r[:, 0]), np.nanmax(r[:, 1])
return (self.maybe_expand_bounds(x_extents),
self.compute_y_bounds())
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu, extend_cuda = _build_extend_line_axis1_y_constant(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_names = self.x
y_values = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
if cudf and isinstance(df, cudf.DataFrame):
xs = self.to_cupy_array(df, x_names)
ys = cp.asarray(y_values)
do_extend = extend_cuda[cuda_args(xs.shape)]
else:
xs = df.loc[:, list(x_names)].to_numpy()
ys = y_values
do_extend = extend_cpu
do_extend(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
)
return extend
class LinesAxis1Ragged(_PointLike, _AntiAliasedLine):
def validate(self, in_dshape):
try:
from datashader.datatypes import RaggedDtype
except ImportError:
RaggedDtype = type(None)
if not isinstance(in_dshape[str(self.x)], RaggedDtype):
raise ValueError('x must be a RaggedArray')
elif not isinstance(in_dshape[str(self.y)], RaggedDtype):
raise ValueError('y must be a RaggedArray')
def required_columns(self):
return (self.x,) + (self.y,)
def compute_x_bounds(self, df):
bounds = self._compute_bounds(df[self.x].array.flat_array)
return self.maybe_expand_bounds(bounds)
def compute_y_bounds(self, df):
bounds = self._compute_bounds(df[self.y].array.flat_array)
return self.maybe_expand_bounds(bounds)
@memoize
def compute_bounds_dask(self, ddf):
r = ddf.map_partitions(lambda df: np.array([[
np.nanmin(df[self.x].array.flat_array).item(),
np.nanmax(df[self.x].array.flat_array).item(),
np.nanmin(df[self.y].array.flat_array).item(),
np.nanmax(df[self.y].array.flat_array).item()]]
)).compute()
x_extents = np.nanmin(r[:, 0]), np.nanmax(r[:, 1])
y_extents = np.nanmin(r[:, 2]), np.nanmax(r[:, 3])
return (self.maybe_expand_bounds(x_extents),
self.maybe_expand_bounds(y_extents))
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
extend_cpu = _build_extend_line_axis1_ragged(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
x_name = self.x
y_name = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
xs = df[x_name].array
ys = df[y_name].array
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
# line may be clipped, then mapped to pixels
extend_cpu(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
)
return extend
class LineAxis1Geometry(_GeometryLike, _AntiAliasedLine):
# spatialpandas must be available if a LineAxis1Geometry object is created.
@property
def geom_dtypes(self):
from spatialpandas.geometry import (
LineDtype, MultiLineDtype, RingDtype, PolygonDtype,
MultiPolygonDtype
)
return (LineDtype, MultiLineDtype, RingDtype,
PolygonDtype, MultiPolygonDtype)
@memoize
def _internal_build_extend(
self, x_mapper, y_mapper, info, append, line_width, antialias_stage_2, antialias_stage_2_funcs):
from spatialpandas.geometry import (
PolygonArray, MultiPolygonArray, RingArray
)
antialias = line_width > 0
expand_aggs_and_cols = self.expand_aggs_and_cols(append)
map_onto_pixel = _build_map_onto_pixel_for_line(
x_mapper, y_mapper, antialias)
overwrite, use_2_stage_agg = two_stage_agg(antialias_stage_2)
if not use_2_stage_agg:
antialias_stage_2_funcs = None
draw_segment = _build_draw_segment(
append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite
)
perform_extend_cpu = _build_extend_line_axis1_geometry(
draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs,
)
geometry_name = self.geometry
def extend(aggs, df, vt, bounds, plot_start=True):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
aggs_and_cols = aggs + info(df, aggs[0].shape[:2])
geom_array = df[geometry_name].array
# Use type to decide whether geometry represents a closed .
# We skip for closed geometries so as not to double count the first/last
# pixel
if isinstance(geom_array, (PolygonArray, MultiPolygonArray)):
# Convert polygon array to multi line of boundary
geom_array = geom_array.boundary
closed_rings = True
elif isinstance(geom_array, RingArray):
closed_rings = True
else:
closed_rings = False
perform_extend_cpu(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
geom_array, closed_rings, antialias_stage_2, *aggs_and_cols
)
return extend
def _build_map_onto_pixel_for_line(x_mapper, y_mapper, want_antialias=False):
@ngjit
def map_onto_pixel_snap(sx, tx, sy, ty, xmin, xmax, ymin, ymax, x, y):
"""Map points onto pixel grid.
Points falling on upper bound are mapped into previous bin.
If the line has been clipped, x and y will have been
computed to lie on the bounds; we compare point and bounds
in integer space to avoid fp error. In contrast, with
auto-ranging, a point on the bounds will be the same
floating point number as the bound, so comparison in fp
representation of continuous space or in integer space
doesn't change anything.
"""
xx = int(x_mapper(x) * sx + tx)
yy = int(y_mapper(y) * sy + ty)
# Note that sx and tx were designed so that
# x_mapper(xmax) * sx + tx equals the width of the canvas in pixels
#
# Likewise, sy and ty were designed so that
# y_mapper(ymax) * sy + ty equals the height of the canvas in pixels
#
# We round these results to integers (rather than casting to integers
# with the int constructor) to handle cases where floating-point
# precision errors results in a value just under the integer number
# of pixels.
xxmax = round(x_mapper(xmax) * sx + tx)
yymax = round(y_mapper(ymax) * sy + ty)
return (xx - 1 if xx == xxmax else xx,
yy - 1 if yy == yymax else yy)
@ngjit
def map_onto_pixel_no_snap(sx, tx, sy, ty, xmin, xmax, ymin, ymax, x, y):
xx = x_mapper(x)*sx + tx - 0.5
yy = y_mapper(y)*sy + ty - 0.5
return xx, yy
if want_antialias:
return map_onto_pixel_no_snap
else:
return map_onto_pixel_snap
@ngjit
def _liang_barsky(xmin, xmax, ymin, ymax, x0, x1, y0, y1, skip):
""" An implementation of the Liang-Barsky line clipping algorithm.
https://en.wikipedia.org/wiki/Liang%E2%80%93Barsky_algorithm
"""
# Check if line is fully outside viewport
if x0 < xmin and x1 < xmin:
skip = True
elif x0 > xmax and x1 > xmax:
skip = True
elif y0 < ymin and y1 < ymin:
skip = True
elif y0 > ymax and y1 > ymax:
skip = True
t0, t1 = 0, 1
dx1 = x1 - x0
t0, t1, accept = _clipt(-dx1, x0 - xmin, t0, t1)
if not accept:
skip = True
t0, t1, accept = _clipt(dx1, xmax - x0, t0, t1)
if not accept:
skip = True
dy1 = y1 - y0
t0, t1, accept = _clipt(-dy1, y0 - ymin, t0, t1)
if not accept:
skip = True
t0, t1, accept = _clipt(dy1, ymax - y0, t0, t1)
if not accept:
skip = True
if t1 < 1:
clipped_end = True
x1 = x0 + t1 * dx1
y1 = y0 + t1 * dy1
else:
clipped_end = False
if t0 > 0:
# If x0 is clipped, we need to plot the new start
clipped_start = True
x0 = x0 + t0 * dx1
y0 = y0 + t0 * dy1
else:
clipped_start = False
return x0, x1, y0, y1, skip, clipped_start, clipped_end
@ngjit
def _clipt(p, q, t0, t1):
accept = True
if p < 0 and q < 0:
r = q / p
if r > t1:
accept = False
elif r > t0:
t0 = r
elif p > 0 and q < p:
r = q / p
if r < t0:
accept = False
elif r < t1:
t1 = r
elif q < 0:
accept = False
return t0, t1, accept
@ngjit
def _clamp(x, low, high):
# Clamp ``x`` in the range ``low`` to ``high``.
return max(low, min(x, high))
@ngjit
def _linearstep(edge0, edge1, x):
t = _clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t
@ngjit
def _x_intercept(y, cx0, cy0, cx1, cy1):
# Return x value of intercept between line at constant y and line
# between corner points.
if cy0 == cy1:
# Line is horizontal, return the "upper", i.e. right-hand, end of it.
return cx1
frac = (y - cy0) / (cy1 - cy0) # In range 0..1
return cx0 + frac*(cx1 - cx0)
def _build_full_antialias(expand_aggs_and_cols):
"""Specialize antialiased line drawing algorithm for a given append/axis combination"""
@ngjit
@expand_aggs_and_cols
def _full_antialias(line_width, overwrite, i, x0, x1, y0, y1,
segment_start, segment_end, xm, ym, append,
nx, ny, buffer, *aggs_and_cols):
"""Draw an antialiased line segment.
If overwrite=True can overwrite each pixel multiple times because
using max for the overwriting. If False can only write each pixel
once per segment and its previous segment.
Argument xm, ym are only valid if overwrite and segment_start are False.
"""
if x0 == x1 and y0 == y1:
return
# Scan occurs in y-direction. But wish to scan in the shortest direction,
# so if |x0-x1| < |y0-y1| then flip (x,y) coords for maths and flip back
# again before setting pixels.
flip_xy = abs(x0-x1) < abs(y0-y1)
if flip_xy:
x0, y0 = y0, x0
x1, y1 = y1, x1
xm, ym = ym, xm
scale = 1.0
# line_width less than 1 is rendered as 1 but with lower intensity.
if line_width < 1.0:
scale *= line_width
line_width = 1.0
aa = 1.0
halfwidth = 0.5*(line_width + aa)
# Want y0 <= y1, so switch vertical direction if this is not so.
flip_order = y1 < y0 or (y1 == y0 and x1 < x0)
# Start (x0, y0), end (y0, y1)
# c1 +-------------+ c2 along | right
# (x0, y0) | o o | (x1, y1) vector | vector
# c0 +-------------+ c3 ----> v
alongx = float(x1 - x0)
alongy = float(y1 - y0) # Always +ve
length = math.sqrt(alongx**2 + alongy**2)
alongx /= length
alongy /= length
rightx = alongy
righty = -alongx
# 4 corners, x and y. Uses buffer, which must have length 8. Order of coords is
# (x0, x1, x2, x3, y0, y1, y2, y3). Each CPU/GPU thread has its own local buffer
# so there is no cross-talk. Contents of buffer are written and read within the
# lifetime of this function, so it doesn't matter what they are before this
# function is called or after it returns.
if flip_order:
buffer[0] = x1 - halfwidth*( rightx - alongx)
buffer[1] = x1 - halfwidth*(-rightx - alongx)
buffer[2] = x0 - halfwidth*(-rightx + alongx)
buffer[3] = x0 - halfwidth*( rightx + alongx)
buffer[4] = y1 - halfwidth*( righty - alongy)
buffer[5] = y1 - halfwidth*(-righty - alongy)
buffer[6] = y0 - halfwidth*(-righty + alongy)
buffer[7] = y0 - halfwidth*( righty + alongy)
else:
buffer[0] = x0 + halfwidth*( rightx - alongx)
buffer[1] = x0 + halfwidth*(-rightx - alongx)
buffer[2] = x1 + halfwidth*(-rightx + alongx)
buffer[3] = x1 + halfwidth*( rightx + alongx)
buffer[4] = y0 + halfwidth*( righty - alongy)
buffer[5] = y0 + halfwidth*(-righty - alongy)
buffer[6] = y1 + halfwidth*(-righty + alongy)
buffer[7] = y1 + halfwidth*( righty + alongy)
xmax = nx-1
ymax = ny-1
if flip_xy:
xmax, ymax = ymax, xmax
# Index of lowest-y point.
if flip_order:
lowindex = 0 if x0 > x1 else 1
else:
lowindex = 0 if x1 > x0 else 1
if not overwrite and not segment_start:
prev_alongx = x0 - xm
prev_alongy = y0 - ym
prev_length = math.sqrt(prev_alongx**2 + prev_alongy**2)
if prev_length > 0.0:
prev_alongx /= prev_length
prev_alongy /= prev_length
prev_rightx = prev_alongy
prev_righty = -prev_alongx
else:
overwrite = True
# y limits of scan.
ystart = _clamp(math.ceil(buffer[4 + lowindex]), 0, ymax)
yend = _clamp(math.floor(buffer[4 + (lowindex+2) % 4]), 0, ymax)
# Need to know which edges are to left and right; both will change.
ll = lowindex # Index of lower point of left edge.
lu = (ll + 1) % 4 # Index of upper point of left edge.
rl = lowindex # Index of lower point of right edge.
ru = (rl + 3) % 4 # Index of upper point of right edge.
for y in range(ystart, yend+1):
if ll == lowindex and y > buffer[4 + lu]:
ll = lu
lu = (ll + 1) % 4
if rl == lowindex and y > buffer[4 + ru]:
rl = ru
ru = (rl + 3) % 4
# Find x limits of scan at this y.
xleft = _clamp(math.ceil(_x_intercept(
y, buffer[ll], buffer[4+ll], buffer[lu], buffer[4+lu])), 0, xmax)
xright = _clamp(math.floor(_x_intercept(
y, buffer[rl], buffer[4+rl], buffer[ru], buffer[4+ru])), 0, xmax)
for x in range(xleft, xright+1):
along = (x-x0)*alongx + (y-y0)*alongy # dot product
prev_correction = False
if along < 0.0:
# Before start of segment
if overwrite or segment_start or (x-x0)*prev_alongx + (y-y0)*prev_alongy > 0.0:
distance = math.sqrt((x-x0)**2 + (y-y0)**2) # round join/end cap
else:
continue
elif along > length:
# After end of segment
if overwrite or segment_end:
distance = math.sqrt((x-x1)**2 + (y-y1)**2) # round join/end cap
else:
continue
else:
# Within segment
distance = abs((x-x0)*rightx + (y-y0)*righty)
if not overwrite and not segment_start and \
-prev_length <= (x-x0)*prev_alongx + (y-y0)*prev_alongy <= 0.0 and \
abs((x-x0)*prev_rightx + (y-y0)*prev_righty) <= halfwidth:
prev_correction = True
value = 1.0 - _linearstep(0.5*(line_width - aa), halfwidth, distance)
value *= scale
if prev_correction:
# Already set pixel from previous segment, need to correct it
prev_distance = abs((x-x0)*prev_rightx + (y-y0)*prev_righty)
prev_value = 1.0 - _linearstep(0.5*(line_width - aa), halfwidth, prev_distance)
prev_value *= scale
value = value - prev_value # Correction from previous segment.
if value > 0.0:
xx, yy = (y, x) if flip_xy else (x, y)
append(i, xx, yy, value, *aggs_and_cols)
return _full_antialias
def _build_bresenham(expand_aggs_and_cols):
"""Specialize a bresenham kernel for a given append/axis combination"""
@ngjit
@expand_aggs_and_cols
def _bresenham(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax, segment_start,
x0, x1, y0, y1, clipped, append, *aggs_and_cols):
"""Draw a line segment using Bresenham's algorithm
This method plots a line segment with integer coordinates onto a pixel
grid.
"""
dx = x1 - x0
ix = (dx > 0) - (dx < 0)
dx = abs(dx) * 2
dy = y1 - y0
iy = (dy > 0) - (dy < 0)
dy = abs(dy) * 2
# If vertices weren't clipped and are concurrent in integer space,
# call append and return, so that the second vertex won't be hit below.
if not clipped and not (dx | dy):
append(i, x0, y0, *aggs_and_cols)
return
if segment_start:
append(i, x0, y0, *aggs_and_cols)
if dx >= dy:
error = 2 * dy - dx
while x0 != x1:
if error >= 0 and (error or ix > 0):
error -= 2 * dx
y0 += iy
error += 2 * dy
x0 += ix
append(i, x0, y0, *aggs_and_cols)
else:
error = 2 * dx - dy
while y0 != y1:
if error >= 0 and (error or iy > 0):
error -= 2 * dy
x0 += ix
error += 2 * dx
y0 += iy
append(i, x0, y0, *aggs_and_cols)
return _bresenham
def _build_draw_segment(append, map_onto_pixel, expand_aggs_and_cols, line_width, overwrite):
"""Specialize a line plotting kernel for a given append/axis combination"""
if line_width > 0.0:
_bresenham = None
_full_antialias = _build_full_antialias(expand_aggs_and_cols)
else:
_bresenham = _build_bresenham(expand_aggs_and_cols)
_full_antialias = None
@ngjit
@expand_aggs_and_cols
def draw_segment(
i, sx, tx, sy, ty, xmin, xmax, ymin, ymax, segment_start, segment_end,
x0, x1, y0, y1, xm, ym, buffer, *aggs_and_cols
):
# xm, ym are only valid if segment_start is True.
# buffer is a length-8 float64 array if antialiasing is to be used,
# or None otherwise. It is allocated in the appropriate extend_cpu or
# extend_cuda function so that it is of the correct type (numpy or
# cupy) and that there is one per CPU/GPU thread.
# NOTE: The slightly bizarre variable versioning herein for variables
# x0, y0, y0, y1 is to deal with Numba not having SSA form prior to
# version 0.49.0. The result of lack of SSA is that the type inference
# algorithms would widen types that are multiply defined as would be the
# case in code such as `x, y = function(x, y)` if the function returned
# a wider type for x, y then the input x, y.
skip = False
# If any of the coordinates are NaN, there's a discontinuity.
# Skip the entire segment.
if isnull(x0) or isnull(y0) or isnull(x1) or isnull(y1):
skip = True
# Use Liang-Barsky to clip the segment to a bounding box
x0_1, x1_1, y0_1, y1_1, skip, clipped_start, clipped_end = \
_liang_barsky(xmin, xmax, ymin, ymax, x0, x1, y0, y1, skip)
if not skip:
clipped = clipped_start or clipped_end
segment_start = segment_start or clipped_start
x0_2, y0_2 = map_onto_pixel(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0_1, y0_1
)
x1_2, y1_2 = map_onto_pixel(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, x1_1, y1_1
)
if line_width > 0.0:
if segment_start:
xm_2 = ym_2 = 0.0
else:
xm_2, ym_2 = map_onto_pixel(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xm, ym)
nx = round((xmax - xmin)*sx)
ny = round((ymax - ymin)*sy)
_full_antialias(line_width, overwrite, i, x0_2, x1_2, y0_2, y1_2,
segment_start, segment_end, xm_2, ym_2, append,
nx, ny, buffer, *aggs_and_cols)
else:
_bresenham(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, x0_2, x1_2, y0_2, y1_2,
clipped, append, *aggs_and_cols)
return draw_segment
def _build_extend_line_axis0(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
use_2_stage_agg = antialias_stage_2_funcs is not None
@ngjit
@expand_aggs_and_cols
def perform_extend_line(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols):
x0 = xs[i]
y0 = ys[i]
x1 = xs[i + 1]
y1 = ys[i + 1]
segment_start = (plot_start if i == 0 else
(isnull(xs[i - 1]) or isnull(ys[i - 1])))
segment_end = (i == len(xs)-2) or isnull(xs[i+2]) or isnull(ys[i+2])
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = xs[i-1]
ym = ys[i-1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def extend_cpu(sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, plot_start, antialias_stage_2, *aggs_and_cols):
"""Aggregate along a line formed by ``xs`` and ``ys``"""
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
nrows = xs.shape[0]
for i in range(nrows - 1):
perform_extend_line(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols)
@cuda.jit
@expand_aggs_and_cols
def extend_cuda(sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, plot_start, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = cuda.local.array(8, nb_types.float64) if antialias else None
i = cuda.grid(1)
if i < xs.shape[0] - 1:
perform_extend_line(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols)
return extend_cpu, extend_cuda
def _build_extend_line_axis0_multi(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
@ngjit
@expand_aggs_and_cols
def perform_extend_line(i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols):
x0 = xs[i, j]
y0 = ys[i, j]
x1 = xs[i + 1, j]
y1 = ys[i + 1, j]
segment_start = (plot_start if i == 0 else
(isnull(xs[i - 1, j]) or isnull(ys[i - 1, j])))
segment_end = (i == len(xs)-2) or isnull(xs[i+2, j]) or isnull(ys[i+2, j])
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = xs[i-1, j]
ym = ys[i-1, j]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def extend_cpu(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
plot_start, antialias_stage_2, *aggs_and_cols):
"""Aggregate along a line formed by ``xs`` and ``ys``"""
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
nrows, ncols = xs.shape
for j in range(ncols):
for i in range(nrows - 1):
perform_extend_line(i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols)
def extend_cpu_antialias_2agg(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
plot_start, antialias_stage_2, *aggs_and_cols):
"""Aggregate along a line formed by ``xs`` and ``ys``"""
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
plot_start, antialias_stage_2, aggs_and_accums, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
plot_start, antialias_stage_2, aggs_and_accums, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
nrows, ncols = xs.shape
for j in range(ncols):
for i in range(nrows - 1):
perform_extend_line(i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols)
if ncols == 1:
return
aa_stage_2_accumulate(aggs_and_accums, j==0)
if j < ncols - 1:
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
@cuda.jit
@expand_aggs_and_cols
def extend_cuda(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
plot_start, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = cuda.local.array(8, nb_types.float64) if antialias else None
i, j = cuda.grid(2)
if i < xs.shape[0] - 1 and j < xs.shape[1]:
perform_extend_line(i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
plot_start, xs, ys, buffer, *aggs_and_cols)
if use_2_stage_agg:
return extend_cpu_antialias_2agg, extend_cuda
else:
return extend_cpu, extend_cuda
def _build_extend_line_axis1_none_constant(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
@ngjit
@expand_aggs_and_cols
def perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols
):
x0 = xs[i, j]
y0 = ys[i, j]
x1 = xs[i, j + 1]
y1 = ys[i, j + 1]
segment_start = (
(j == 0) or isnull(xs[i, j - 1]) or isnull(ys[i, j - 1])
)
segment_end = (j == xs.shape[1]-2) or isnull(xs[i, j+2]) or isnull(ys[i, j+2])
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = xs[i, j-1]
ym = ys[i, j-1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def extend_cpu(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = xs.shape[1]
for i in range(xs.shape[0]):
for j in range(ncols - 1):
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols
)
def extend_cpu_antialias_2agg(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, *aggs_and_cols):
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = xs.shape[1]
for i in range(xs.shape[0]):
for j in range(ncols - 1):
perform_extend_line(i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols)
if xs.shape[0] == 1:
return
aa_stage_2_accumulate(aggs_and_accums, i==0)
if i < xs.shape[0] - 1:
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
@cuda.jit
@expand_aggs_and_cols
def extend_cuda(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = cuda.local.array(8, nb_types.float64) if antialias else None
i, j = cuda.grid(2)
if i < xs.shape[0] and j < xs.shape[1] - 1:
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
buffer, *aggs_and_cols
)
if use_2_stage_agg:
return extend_cpu_antialias_2agg, extend_cuda
else:
return extend_cpu, extend_cuda
def _build_extend_line_axis1_x_constant(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
@ngjit
@expand_aggs_and_cols
def perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, buffer, *aggs_and_cols
):
x0 = xs[j]
y0 = ys[i, j]
x1 = xs[j + 1]
y1 = ys[i, j + 1]
segment_start = (
(j == 0) or isnull(xs[j - 1]) or isnull(ys[i, j - 1])
)
segment_end = (j == len(xs)-2) or isnull(xs[j+2]) or isnull(ys[i, j+2])
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = xs[j-1]
ym = ys[i, j-1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def extend_cpu(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = ys.shape[1]
for i in range(ys.shape[0]):
for j in range(ncols - 1):
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, buffer, *aggs_and_cols
)
def extend_cpu_antialias_2agg(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, *aggs_and_cols):
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = ys.shape[1]
for i in range(ys.shape[0]):
for j in range(ncols - 1):
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
buffer, *aggs_and_cols
)
if ys.shape[0] == 1:
return
aa_stage_2_accumulate(aggs_and_accums, i==0)
if i < ys.shape[0] - 1:
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
@cuda.jit
@expand_aggs_and_cols
def extend_cuda(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = cuda.local.array(8, nb_types.float64) if antialias else None
i, j = cuda.grid(2)
if i < ys.shape[0] and j < ys.shape[1] - 1:
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
buffer, *aggs_and_cols
)
if use_2_stage_agg:
return extend_cpu_antialias_2agg, extend_cuda
else:
return extend_cpu, extend_cuda
def _build_extend_line_axis1_y_constant(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
@ngjit
@expand_aggs_and_cols
def perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, buffer, *aggs_and_cols
):
x0 = xs[i, j]
y0 = ys[j]
x1 = xs[i, j + 1]
y1 = ys[j + 1]
segment_start = (
(j == 0) or isnull(xs[i, j - 1]) or isnull(ys[j - 1])
)
segment_end = (j == len(ys)-2) or isnull(xs[i, j+2]) or isnull(ys[j+2])
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = xs[i, j-1]
ym = ys[j-1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def extend_cpu(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = xs.shape[1]
for i in range(xs.shape[0]):
for j in range(ncols - 1):
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols
)
def extend_cpu_antialias_2agg(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, *aggs_and_cols):
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols)
@ngjit
@expand_aggs_and_cols
def cpu_antialias_2agg_impl(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys,
antialias_stage_2, aggs_and_accums, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
ncols = xs.shape[1]
for i in range(xs.shape[0]):
for j in range(ncols - 1):
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols
)
if xs.shape[0] == 1:
return
aa_stage_2_accumulate(aggs_and_accums, i==0)
if i < xs.shape[0] - 1:
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
@cuda.jit
@expand_aggs_and_cols
def extend_cuda(sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols):
antialias = antialias_stage_2 is not None
buffer = cuda.local.array(8, nb_types.float64) if antialias else None
i, j = cuda.grid(2)
if i < xs.shape[0] and j < xs.shape[1] - 1:
perform_extend_line(
i, j, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
xs, ys, buffer, *aggs_and_cols
)
if use_2_stage_agg:
return extend_cpu_antialias_2agg, extend_cuda
else:
return extend_cpu, extend_cuda
def _build_extend_line_axis1_ragged(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
def extend_cpu(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
):
x_start_i = xs.start_indices
x_flat = xs.flat_array
y_start_i = ys.start_indices
y_flat = ys.flat_array
extend_cpu_numba(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
x_start_i, x_flat, y_start_i, y_flat, antialias_stage_2, *aggs_and_cols
)
@ngjit
@expand_aggs_and_cols
def extend_cpu_numba(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
x_start_i, x_flat, y_start_i, y_flat, antialias_stage_2, *aggs_and_cols
):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
nrows = len(x_start_i)
x_flat_len = len(x_flat)
y_flat_len = len(y_flat)
for i in range(nrows):
# Get x index range
x_start_index = x_start_i[i]
x_stop_index = (x_start_i[i + 1]
if i < nrows - 1
else x_flat_len)
# Get y index range
y_start_index = y_start_i[i]
y_stop_index = (y_start_i[i + 1]
if i < nrows - 1
else y_flat_len)
# Find line segment length as shorter of the two segments
segment_len = min(x_stop_index - x_start_index,
y_stop_index - y_start_index)
for j in range(segment_len - 1):
x0 = x_flat[x_start_index + j]
y0 = y_flat[y_start_index + j]
x1 = x_flat[x_start_index + j + 1]
y1 = y_flat[y_start_index + j + 1]
segment_start = (
(j == 0) or
isnull(x_flat[x_start_index + j - 1]) or
isnull(y_flat[y_start_index + j - 1])
)
segment_end = (
(j == segment_len-2) or
isnull(x_flat[x_start_index + j + 2]) or
isnull(y_flat[y_start_index + j + 2])
)
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
else:
xm = x_flat[x_start_index + j - 1]
ym = y_flat[y_start_index + j - 1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
def extend_cpu_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, xs, ys, antialias_stage_2, *aggs_and_cols
):
x_start_i = xs.start_indices
x_flat = xs.flat_array
y_start_i = ys.start_indices
y_flat = ys.flat_array
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
extend_cpu_numba_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, x_start_i, x_flat,
y_start_i, y_flat, antialias_stage_2, aggs_and_accums, *aggs_and_cols
)
@ngjit
@expand_aggs_and_cols
def extend_cpu_numba_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax, x_start_i, x_flat,
y_start_i, y_flat, antialias_stage_2, aggs_and_accums, *aggs_and_cols
):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
nrows = len(x_start_i)
x_flat_len = len(x_flat)
y_flat_len = len(y_flat)
for i in range(nrows):
# Get x index range
x_start_index = x_start_i[i]
x_stop_index = (x_start_i[i + 1]
if i < nrows - 1
else x_flat_len)
# Get y index range
y_start_index = y_start_i[i]
y_stop_index = (y_start_i[i + 1]
if i < nrows - 1
else y_flat_len)
# Find line segment length as shorter of the two segments
segment_len = min(x_stop_index - x_start_index,
y_stop_index - y_start_index)
for j in range(segment_len - 1):
x0 = x_flat[x_start_index + j]
y0 = y_flat[y_start_index + j]
x1 = x_flat[x_start_index + j + 1]
y1 = y_flat[y_start_index + j + 1]
segment_start = (
(j == 0) or
isnull(x_flat[x_start_index + j - 1]) or
isnull(y_flat[y_start_index + j - 1])
)
segment_end = (
(j == segment_len-2) or
isnull(x_flat[x_start_index + j + 2]) or
isnull(y_flat[y_start_index + j + 2])
)
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
0.0, 0.0, buffer, *aggs_and_cols)
if nrows == 1:
return
aa_stage_2_accumulate(aggs_and_accums, i==0)
if i < nrows - 1:
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
if use_2_stage_agg:
return extend_cpu_antialias_2agg
else:
return extend_cpu
def _build_extend_line_axis1_geometry(draw_segment, expand_aggs_and_cols, antialias_stage_2_funcs):
if antialias_stage_2_funcs is not None:
aa_stage_2_accumulate, aa_stage_2_clear, aa_stage_2_copy_back = antialias_stage_2_funcs
use_2_stage_agg = antialias_stage_2_funcs is not None
def extend_cpu(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
geometry, closed_rings, antialias_stage_2, *aggs_and_cols
):
values = geometry.buffer_values
missing = geometry.isna()
offsets = geometry.buffer_offsets
if len(offsets) == 2:
# MultiLineArray
offsets0, offsets1 = offsets
else:
# LineArray
offsets1 = offsets[0]
offsets0 = np.arange(len(offsets1))
if geometry._sindex is not None:
# Compute indices of potentially intersecting polygons using
# geometry's R-tree if there is one
eligible_inds = geometry.sindex.intersects((xmin, ymin, xmax, ymax))
else:
# Otherwise, process all indices
eligible_inds = np.arange(0, len(geometry), dtype='uint32')
extend_cpu_numba(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
values, missing, offsets0, offsets1, eligible_inds,
closed_rings, antialias_stage_2, *aggs_and_cols
)
@ngjit
@expand_aggs_and_cols
def extend_cpu_numba(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
values, missing, offsets0, offsets1, eligible_inds,
closed_rings, antialias_stage_2, *aggs_and_cols
):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
for i in eligible_inds:
if missing[i]:
continue
start0 = offsets0[i]
stop0 = offsets0[i + 1]
for j in range(start0, stop0):
start1 = offsets1[j]
stop1 = offsets1[j + 1]
for k in range(start1, stop1 - 2, 2):
x0 = values[k]
if not np.isfinite(x0):
continue
y0 = values[k + 1]
if not np.isfinite(y0):
continue
x1 = values[k + 2]
if not np.isfinite(x1):
continue
y1 = values[k + 3]
if not np.isfinite(y1):
continue
segment_start = (
(k == start1 and not closed_rings) or
(k > start1 and
(not np.isfinite(values[k - 2]) or not np.isfinite(values[k - 1])))
)
segment_end = (
(not closed_rings and k == stop1-4) or
(k < stop1-4 and
(not np.isfinite(values[k + 4]) or not np.isfinite(values[k + 5])))
)
if segment_start or use_2_stage_agg:
xm = 0.0
ym = 0.0
elif k == start1 and closed_rings:
xm = values[stop1-4]
ym = values[stop1-3]
else:
xm = values[k-2]
ym = values[k-1]
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
xm, ym, buffer, *aggs_and_cols)
def extend_cpu_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
geometry, closed_rings, antialias_stage_2, *aggs_and_cols
):
values = geometry.buffer_values
missing = geometry.isna()
offsets = geometry.buffer_offsets
if len(offsets) == 2:
# MultiLineArray
offsets0, offsets1 = offsets
else:
# LineArray
offsets1 = offsets[0]
offsets0 = np.arange(len(offsets1))
if geometry._sindex is not None:
# Compute indices of potentially intersecting polygons using
# geometry's R-tree if there is one
eligible_inds = geometry.sindex.intersects((xmin, ymin, xmax, ymax))
else:
# Otherwise, process all indices
eligible_inds = np.arange(0, len(geometry), dtype='uint32')
n_aggs = len(antialias_stage_2[0])
aggs_and_accums = tuple((agg, agg.copy()) for agg in aggs_and_cols[:n_aggs])
extend_cpu_numba_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
values, missing, offsets0, offsets1, eligible_inds,
closed_rings, antialias_stage_2, aggs_and_accums, *aggs_and_cols
)
@ngjit
@expand_aggs_and_cols
def extend_cpu_numba_antialias_2agg(
sx, tx, sy, ty, xmin, xmax, ymin, ymax,
values, missing, offsets0, offsets1, eligible_inds,
closed_rings, antialias_stage_2, aggs_and_accums, *aggs_and_cols
):
antialias = antialias_stage_2 is not None
buffer = np.empty(8) if antialias else None
first_pass = True
for i in eligible_inds:
if missing[i]:
continue
start0 = offsets0[i]
stop0 = offsets0[i + 1]
for j in range(start0, stop0):
start1 = offsets1[j]
stop1 = offsets1[j + 1]
for k in range(start1, stop1 - 2, 2):
x0 = values[k]
if not np.isfinite(x0):
continue
y0 = values[k + 1]
if not np.isfinite(y0):
continue
x1 = values[k + 2]
if not np.isfinite(x1):
continue
y1 = values[k + 3]
if not np.isfinite(y1):
continue
segment_start = (
(k == start1 and not closed_rings) or
(k > start1 and
(not np.isfinite(values[k - 2]) or not np.isfinite(values[k - 1])))
)
segment_end = (
(not closed_rings and k == stop1-4) or
(k < stop1-4 and
(not np.isfinite(values[k + 4]) or not np.isfinite(values[k + 5])))
)
draw_segment(i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,
segment_start, segment_end, x0, x1, y0, y1,
0.0, 0.0, buffer, *aggs_and_cols)
aa_stage_2_accumulate(aggs_and_accums, first_pass)
first_pass = False
aa_stage_2_clear(aggs_and_accums)
aa_stage_2_copy_back(aggs_and_accums)
if use_2_stage_agg:
return extend_cpu_antialias_2agg
else:
return extend_cpu
|
7c7e658c1bab5a1dd21b35376e2dd16422e12ae1
|
b3db95f1741e50140a6dd14f199cc585e3b61254
|
/tools/releasetools/test_validate_target_files.py
|
48b563d7b009475db03191987349c6bfe1c71316
|
[
"Apache-2.0"
] |
permissive
|
aosp-mirror/platform_build
|
58bc3f117b721b555203c04b9a2636c51dfc009d
|
ef9ba4d22bb56b0455a2d207300cf7ed18d8e5dc
|
refs/heads/main
| 2023-08-17T04:01:06.769170
| 2023-08-17T00:35:36
| 2023-08-17T00:35:36
| 65,832
| 215
| 251
| null | 2023-08-04T13:45:36
| 2008-10-21T18:19:56
|
Makefile
|
UTF-8
|
Python
| false
| false
| 13,647
|
py
|
test_validate_target_files.py
|
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for validate_target_files.py."""
import os
import os.path
import shutil
import zipfile
import common
import test_utils
from rangelib import RangeSet
from validate_target_files import (ValidateVerifiedBootImages,
ValidateFileConsistency, CheckBuildPropDuplicity)
from verity_utils import CreateVerityImageBuilder
class ValidateTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
def _generate_boot_image(self, output_file):
kernel = common.MakeTempFile(prefix='kernel-')
with open(kernel, 'wb') as kernel_fp:
kernel_fp.write(os.urandom(10))
cmd = ['mkbootimg', '--kernel', kernel, '-o', output_file]
proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
"Failed to run mkbootimg: {}".format(stdoutdata))
cmd = ['boot_signer', '/boot', output_file,
os.path.join(self.testdata_dir, 'testkey.pk8'),
os.path.join(self.testdata_dir, 'testkey.x509.pem'), output_file]
proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
"Failed to sign boot image with boot_signer: {}".format(stdoutdata))
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_bootImage(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
boot_image = os.path.join(input_tmp, 'IMAGES', 'boot.img')
self._generate_boot_image(boot_image)
info_dict = {
'boot_signer' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'),
}
ValidateVerifiedBootImages(input_tmp, info_dict, options)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_bootImage_wrongKey(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
boot_image = os.path.join(input_tmp, 'IMAGES', 'boot.img')
self._generate_boot_image(boot_image)
info_dict = {
'boot_signer' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'verity.x509.pem'),
}
self.assertRaises(
AssertionError, ValidateVerifiedBootImages, input_tmp, info_dict,
options)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_bootImage_corrupted(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
boot_image = os.path.join(input_tmp, 'IMAGES', 'boot.img')
self._generate_boot_image(boot_image)
# Corrupt the late byte of the image.
with open(boot_image, 'r+b') as boot_fp:
boot_fp.seek(-1, os.SEEK_END)
last_byte = boot_fp.read(1)
last_byte = bytes([255 - ord(last_byte)])
boot_fp.seek(-1, os.SEEK_END)
boot_fp.write(last_byte)
info_dict = {
'boot_signer' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'),
}
self.assertRaises(
AssertionError, ValidateVerifiedBootImages, input_tmp, info_dict,
options)
def _generate_system_image(self, output_file, system_root=None,
file_map=None):
prop_dict = {
'partition_size': str(1024 * 1024),
'verity': 'true',
'verity_block_device': '/dev/block/system',
'verity_key' : os.path.join(self.testdata_dir, 'testkey'),
'verity_fec': "true",
'verity_signer_cmd': 'verity_signer',
}
verity_image_builder = CreateVerityImageBuilder(prop_dict)
image_size = verity_image_builder.CalculateMaxImageSize()
# Use an empty root directory.
if not system_root:
system_root = common.MakeTempDir()
cmd = ['mkuserimg_mke2fs', '-s', system_root, output_file, 'ext4',
'/system', str(image_size), '-j', '0']
if file_map:
cmd.extend(['-B', file_map])
proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
"Failed to create system image with mkuserimg_mke2fs: {}".format(
stdoutdata))
# Append the verity metadata.
verity_image_builder.Build(output_file)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_systemRootImage(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
system_image = os.path.join(input_tmp, 'IMAGES', 'system.img')
self._generate_system_image(system_image)
# Pack the verity key.
verity_key_mincrypt = os.path.join(input_tmp, 'ROOT', 'verity_key')
os.makedirs(os.path.dirname(verity_key_mincrypt))
shutil.copyfile(
os.path.join(self.testdata_dir, 'testkey_mincrypt'),
verity_key_mincrypt)
info_dict = {
'system_root_image' : 'true',
'verity' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'),
'verity_key_mincrypt' : verity_key_mincrypt,
}
ValidateVerifiedBootImages(input_tmp, info_dict, options)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_nonSystemRootImage(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
system_image = os.path.join(input_tmp, 'IMAGES', 'system.img')
self._generate_system_image(system_image)
# Pack the verity key into the root dir in system.img.
verity_key_mincrypt = os.path.join(input_tmp, 'ROOT', 'verity_key')
os.makedirs(os.path.dirname(verity_key_mincrypt))
shutil.copyfile(
os.path.join(self.testdata_dir, 'testkey_mincrypt'),
verity_key_mincrypt)
# And a copy in ramdisk.
verity_key_ramdisk = os.path.join(
input_tmp, 'BOOT', 'RAMDISK', 'verity_key')
os.makedirs(os.path.dirname(verity_key_ramdisk))
shutil.copyfile(
os.path.join(self.testdata_dir, 'testkey_mincrypt'),
verity_key_ramdisk)
info_dict = {
'verity' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'),
'verity_key_mincrypt' : verity_key_mincrypt,
}
ValidateVerifiedBootImages(input_tmp, info_dict, options)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateVerifiedBootImages_nonSystemRootImage_mismatchingKeys(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
system_image = os.path.join(input_tmp, 'IMAGES', 'system.img')
self._generate_system_image(system_image)
# Pack the verity key into the root dir in system.img.
verity_key_mincrypt = os.path.join(input_tmp, 'ROOT', 'verity_key')
os.makedirs(os.path.dirname(verity_key_mincrypt))
shutil.copyfile(
os.path.join(self.testdata_dir, 'testkey_mincrypt'),
verity_key_mincrypt)
# And an invalid copy in ramdisk.
verity_key_ramdisk = os.path.join(
input_tmp, 'BOOT', 'RAMDISK', 'verity_key')
os.makedirs(os.path.dirname(verity_key_ramdisk))
shutil.copyfile(
os.path.join(self.testdata_dir, 'verity_mincrypt'),
verity_key_ramdisk)
info_dict = {
'verity' : 'true',
}
options = {
'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'),
'verity_key_mincrypt' : verity_key_mincrypt,
}
self.assertRaises(
AssertionError, ValidateVerifiedBootImages, input_tmp, info_dict,
options)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateFileConsistency_incompleteRange(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
system_image = os.path.join(input_tmp, 'IMAGES', 'system.img')
system_root = os.path.join(input_tmp, "SYSTEM")
os.mkdir(system_root)
# Write test files that contain multiple blocks of zeros, and these zero
# blocks will be omitted by kernel. Each test file will occupy one block in
# the final system image.
with open(os.path.join(system_root, 'a'), 'w') as f:
f.write('aaa')
f.write('\0' * 4096 * 3)
with open(os.path.join(system_root, 'b'), 'w') as f:
f.write('bbb')
f.write('\0' * 4096 * 3)
raw_file_map = os.path.join(input_tmp, 'IMAGES', 'raw_system.map')
self._generate_system_image(system_image, system_root, raw_file_map)
# Parse the generated file map and update the block ranges for each file.
file_map_list = {}
image_ranges = RangeSet()
with open(raw_file_map) as f:
for line in f.readlines():
info = line.split()
self.assertEqual(2, len(info))
image_ranges = image_ranges.union(RangeSet(info[1]))
file_map_list[info[0]] = RangeSet(info[1])
# Add one unoccupied block as the shared block for all test files.
mock_shared_block = RangeSet("10-20").subtract(image_ranges).first(1)
with open(os.path.join(input_tmp, 'IMAGES', 'system.map'), 'w') as f:
for key in sorted(file_map_list.keys()):
line = '{} {}\n'.format(
key, file_map_list[key].union(mock_shared_block))
f.write(line)
# Prepare for the target zip file
input_file = common.MakeTempFile()
all_entries = ['SYSTEM/', 'SYSTEM/b', 'SYSTEM/a', 'IMAGES/',
'IMAGES/system.map', 'IMAGES/system.img']
with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
for name in all_entries:
input_zip.write(os.path.join(input_tmp, name), arcname=name)
# Expect the validation to pass and both files are skipped due to
# 'incomplete' block range.
with zipfile.ZipFile(input_file) as input_zip:
info_dict = {'extfs_sparse_flag': '-s'}
ValidateFileConsistency(input_zip, input_tmp, info_dict)
@test_utils.SkipIfExternalToolsUnavailable()
def test_ValidateFileConsistency_nonMonotonicRanges(self):
input_tmp = common.MakeTempDir()
os.mkdir(os.path.join(input_tmp, 'IMAGES'))
system_image = os.path.join(input_tmp, 'IMAGES', 'system.img')
system_root = os.path.join(input_tmp, "SYSTEM")
os.mkdir(system_root)
# Write the test file that contain three blocks of 'a', 'b', 'c'.
with open(os.path.join(system_root, 'abc'), 'w') as f:
f.write('a' * 4096 + 'b' * 4096 + 'c' * 4096)
raw_file_map = os.path.join(input_tmp, 'IMAGES', 'raw_system.map')
self._generate_system_image(system_image, system_root, raw_file_map)
# Parse the generated file map and manipulate the block ranges of 'abc' to
# be 'cba'.
file_map_list = {}
with open(raw_file_map) as f:
for line in f.readlines():
info = line.split()
self.assertEqual(2, len(info))
ranges = RangeSet(info[1])
self.assertTrue(ranges.monotonic)
blocks = reversed(list(ranges.next_item()))
file_map_list[info[0]] = ' '.join([str(block) for block in blocks])
# Update the contents of 'abc' to be 'cba'.
with open(os.path.join(system_root, 'abc'), 'w') as f:
f.write('c' * 4096 + 'b' * 4096 + 'a' * 4096)
# Update the system.map.
with open(os.path.join(input_tmp, 'IMAGES', 'system.map'), 'w') as f:
for key in sorted(file_map_list.keys()):
f.write('{} {}\n'.format(key, file_map_list[key]))
# Get the target zip file.
input_file = common.MakeTempFile()
all_entries = ['SYSTEM/', 'SYSTEM/abc', 'IMAGES/',
'IMAGES/system.map', 'IMAGES/system.img']
with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
for name in all_entries:
input_zip.write(os.path.join(input_tmp, name), arcname=name)
with zipfile.ZipFile(input_file) as input_zip:
info_dict = {'extfs_sparse_flag': '-s'}
ValidateFileConsistency(input_zip, input_tmp, info_dict)
@staticmethod
def make_build_prop(build_prop):
input_tmp = common.MakeTempDir()
system_dir = os.path.join(input_tmp, 'SYSTEM')
os.makedirs(system_dir)
prop_file = os.path.join(system_dir, 'build.prop')
with open(prop_file, 'w') as output_file:
output_file.write("\n".join(build_prop))
return input_tmp
def test_checkDuplicateProps_noDuplicate(self):
build_prop = [
'ro.odm.build.date.utc=1578430045',
'ro.odm.build.fingerprint='
'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
'ro.product.odm.device=coral',
]
input_tmp = ValidateTargetFilesTest.make_build_prop(build_prop)
CheckBuildPropDuplicity(input_tmp)
def test_checkDuplicateProps_withDuplicate(self):
build_prop = [
'ro.odm.build.date.utc=1578430045',
'ro.odm.build.date.utc=1578430049',
'ro.odm.build.fingerprint='
'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
'ro.product.odm.device=coral',
]
input_tmp = ValidateTargetFilesTest.make_build_prop(build_prop)
self.assertRaises(ValueError, CheckBuildPropDuplicity, input_tmp)
|
f882140d16f2104a79f0bd5bd82023280af340c4
|
185946ad4ac5a1df75ad8b734c31e81f8d77e6c6
|
/tests/__init__.py
|
3d75d92e8a66c26ac00a6df235a1804155979c92
|
[
"MIT"
] |
permissive
|
konstantint/PassportEye
|
ef4f9fd6dd3b97e18554a062fdb6aeb6bae5d365
|
a3541fc5689edcc24b86ba7392c750fd7468bb02
|
refs/heads/master
| 2023-03-09T22:04:11.925105
| 2023-02-26T22:18:16
| 2023-02-26T23:12:07
| 69,086,175
| 340
| 119
|
MIT
| 2022-03-17T13:46:59
| 2016-09-24T06:58:44
|
Python
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
__init__.py
|
'''
Tests module.
Meant for use with py.test.
Organize tests into files, each named xxx_test.py
Read more here: http://pytest.org/
Author: Konstantin Tretyakov
License: MIT
'''
|
4589abe21350af736d1324d825f16566b64527ef
|
dee9d197c6adfbdb49cd9e33bd3f8614b7d98f06
|
/mt-bluebert/mt_bluebert/blue_train.py
|
615d8e9e3377adc2196e95789b5932b1102c9e72
|
[
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
ncbi-nlp/bluebert
|
2ccd72a19283f8a206e30426375322ec6831398d
|
f4b8af9db9f8c4503d62d0c205de7256f38c5890
|
refs/heads/master
| 2023-05-30T11:13:51.757657
| 2022-04-11T01:44:30
| 2022-04-11T01:44:30
| 190,591,634
| 422
| 73
|
NOASSERTION
| 2023-03-25T01:21:44
| 2019-06-06T14:02:54
|
Python
|
UTF-8
|
Python
| false
| false
| 19,152
|
py
|
blue_train.py
|
# Copyright (c) Microsoft. All rights reserved.
# Modified by Yifan Peng
import argparse
import copy
import json
import os
import random
from datetime import datetime
import numpy as np
import torch
from pytorch_pretrained_bert.modeling import BertConfig
from tensorboardX import SummaryWriter
# from experiments.glue.glue_utils import submit, eval_model
from mt_bluebert.blue_exp_def import BlueTaskDefs
from mt_bluebert.blue_inference import eval_model
from mt_bluebert.data_utils.log_wrapper import create_logger
from mt_bluebert.data_utils.task_def import EncoderModelType
from mt_bluebert.data_utils.utils import set_environment
# from torch.utils.tensorboard import SummaryWriter
from mt_bluebert.mt_dnn.batcher import BatchGen
from mt_bluebert.mt_dnn.model import MTDNNModel
def model_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--update_bert_opt', default=0, type=int)
parser.add_argument('--multi_gpu_on', action='store_true')
parser.add_argument('--mem_cum_type', type=str, default='simple',
help='bilinear/simple/defualt')
parser.add_argument('--answer_num_turn', type=int, default=5)
parser.add_argument('--answer_mem_drop_p', type=float, default=0.1)
parser.add_argument('--answer_att_hidden_size', type=int, default=128)
parser.add_argument('--answer_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_rnn_type', type=str, default='gru',
help='rnn/gru/lstm')
parser.add_argument('--answer_sum_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_merge_opt', type=int, default=1)
parser.add_argument('--answer_mem_type', type=int, default=1)
parser.add_argument('--answer_dropout_p', type=float, default=0.1)
parser.add_argument('--answer_weight_norm_on', action='store_true')
parser.add_argument('--dump_state_on', action='store_true')
parser.add_argument('--answer_opt', type=int, default=0, help='0,1')
parser.add_argument('--label_size', type=str, default='3')
parser.add_argument('--mtl_opt', type=int, default=0)
parser.add_argument('--ratio', type=float, default=0)
parser.add_argument('--mix_opt', type=int, default=0)
parser.add_argument('--max_seq_len', type=int, default=512)
parser.add_argument('--init_ratio', type=float, default=1)
parser.add_argument('--encoder_type', type=int, default=EncoderModelType.BERT)
return parser
def data_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--log_file', default='mt-dnn-train.log', help='path for log file.')
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--tensorboard_logdir', default='tensorboard_logdir')
parser.add_argument("--init_checkpoint", default='mt_dnn_models/bert_model_base.pt', type=str)
parser.add_argument('--data_dir', default='blue_data/canonical_data/bert_uncased_lower')
parser.add_argument('--data_sort_on', action='store_true')
parser.add_argument('--name', default='farmer')
parser.add_argument('--task_def', type=str, default="experiments/blue/blue_task_def.yml")
parser.add_argument('--train_datasets', default='mnli')
parser.add_argument('--test_datasets', default='mnli_mismatched,mnli_matched')
return parser
def train_config(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument('--log_per_updates', type=int, default=500)
parser.add_argument('--save_per_updates', type=int, default=10000)
parser.add_argument('--save_per_updates_on', action='store_true')
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--batch_size_eval', type=int, default=8)
parser.add_argument('--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adadelta, adam')
parser.add_argument('--grad_clipping', type=float, default=0)
parser.add_argument('--global_grad_clipping', type=float, default=1.0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=5e-5)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--warmup', type=float, default=0.1)
parser.add_argument('--warmup_schedule', type=str, default='warmup_linear')
parser.add_argument('--adam_eps', type=float, default=1e-6)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.1)
parser.add_argument('--dropout_w', type=float, default=0.000,
help='Randomly drop a fraction drooput_w of training instances.')
parser.add_argument('--bert_dropout_p', type=float, default=0.1)
# loading
parser.add_argument("--model_ckpt", default='checkpoints/model_0.pt', type=str)
parser.add_argument("--resume", action='store_true')
# EMA
parser.add_argument('--ema_opt', type=int, default=0)
parser.add_argument('--ema_gamma', type=float, default=0.995)
# scheduler
parser.add_argument('--have_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
parser.add_argument('--freeze_layers', type=int, default=-1)
parser.add_argument('--embedding_opt', type=int, default=0)
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--bert_l2norm', type=float, default=0.0)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--output_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
parser.add_argument('--grad_accumulation_step', type=int, default=1)
# fp 16
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# save
parser.add_argument('--not_save', action='store_true', help="Don't save the model")
return parser
def dump(path, data):
with open(path, 'w') as f:
json.dump(data, f)
def dump2(path, uids, scores, predictions):
with open(path, 'w') as f:
for uid, score, pred in zip(uids, scores, predictions):
s = json.dumps({'uid': uid, 'score': score, 'prediction': pred})
f.write(s + '\n')
def generate_decoder_opt(enable_san, max_opt):
opt_v = 0
if enable_san and max_opt < 3:
opt_v = max_opt
return opt_v
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
return args
def main():
args = get_args()
args.train_datasets = args.train_datasets.split(',')
args.test_datasets = args.test_datasets.split(',')
args.output_dir = os.path.abspath(args.output_dir)
os.makedirs(args.output_dir, exist_ok=True)
# log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=args.log_file)
logger.info('args: %s', json.dumps(vars(args), indent=2))
set_environment(args.seed, args.cuda)
task_defs = BlueTaskDefs(args.task_def)
encoder_type = task_defs.encoder_type
assert encoder_type == EncoderModelType.BERT, '%s: only support BERT' % encoder_type
args.encoder_type = encoder_type
logger.info('Launching the MT-DNN training')
# update data dir
train_data_list = []
tasks = {}
tasks_class = {}
nclass_list = []
decoder_opts = []
task_types = []
dropout_list = []
for dataset in args.train_datasets:
task = dataset.split('_')[0]
if task in tasks:
logger.warning('Skipping: %s in %s', task, tasks)
continue
assert task in task_defs.n_class_map, \
'%s not in n_class_map: %s' % (task, task_defs.n_class_map)
assert task in task_defs.data_format_map, \
'%s not in data_format_map: %s' % (task, task_defs.data_format_map)
data_type = task_defs.data_format_map[task]
nclass = task_defs.n_class_map[task]
task_id = len(tasks)
if args.mtl_opt > 0:
task_id = tasks_class[nclass] if nclass in tasks_class else len(tasks_class)
task_type = task_defs.task_type_map[task]
dopt = generate_decoder_opt(task_defs.enable_san_map[task], args.answer_opt)
if task_id < len(decoder_opts):
decoder_opts[task_id] = min(decoder_opts[task_id], dopt)
else:
decoder_opts.append(dopt)
task_types.append(task_type)
if task not in tasks:
tasks[task] = len(tasks)
if args.mtl_opt < 1:
nclass_list.append(nclass)
if nclass not in tasks_class:
tasks_class[nclass] = len(tasks_class)
if args.mtl_opt > 0:
nclass_list.append(nclass)
dropout_p = task_defs.dropout_p_map.get(task, args.dropout_p)
dropout_list.append(dropout_p)
# use train and dev
train_path = os.path.join(args.data_dir, f'{dataset}_train+dev.json')
logger.info('Loading %s as task %s', task, task_id)
train_data = BatchGen(
BatchGen.load(train_path, True, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size,
dropout_w=args.dropout_w,
gpu=args.cuda,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
train_data_list.append(train_data)
dev_data_list = []
test_data_list = []
for dataset in args.test_datasets:
task = dataset.split('_')[0]
task_id = tasks_class[task_defs.n_class_map[task]] if args.mtl_opt > 0 else tasks[task]
task_type = task_defs.task_type_map[task]
data_type = task_defs.data_format_map[task]
dev_path = os.path.join(args.data_dir, f'{dataset}_dev.json')
dev_data = BatchGen(
BatchGen.load(dev_path, False, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size_eval,
gpu=args.cuda,
is_train=False,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
dev_data_list.append(dev_data)
test_path = os.path.join(args.data_dir, f'{dataset}_test.json')
test_data = BatchGen(
BatchGen.load(test_path, False, task_type=task_type, maxlen=args.max_seq_len),
batch_size=args.batch_size_eval,
gpu=args.cuda,
is_train=False,
task_id=task_id,
maxlen=args.max_seq_len,
data_type=data_type,
task_type=task_type,
encoder_type=encoder_type)
test_data_list.append(test_data)
opt = copy.deepcopy(vars(args))
opt['answer_opt'] = decoder_opts
opt['task_types'] = task_types
opt['tasks_dropout_p'] = dropout_list
label_size = ','.join([str(l) for l in nclass_list])
opt['label_size'] = label_size
logger.info('#' * 20)
logger.info('opt: %s', json.dumps(opt, indent=2))
logger.info('#' * 20)
bert_model_path = args.init_checkpoint
state_dict = None
if os.path.exists(bert_model_path):
state_dict = torch.load(bert_model_path)
config = state_dict['config']
config['attention_probs_dropout_prob'] = args.bert_dropout_p
config['hidden_dropout_prob'] = args.bert_dropout_p
opt.update(config)
else:
logger.error('#' * 20)
logger.error('Could not find the init model!\n'
'The parameters will be initialized randomly!')
logger.error('#' * 20)
config = BertConfig(vocab_size_or_config_json_file=30522).to_dict()
opt.update(config)
all_iters = [iter(item) for item in train_data_list]
all_lens = [len(bg) for bg in train_data_list]
# div number of grad accumulation.
num_all_batches = args.epochs * sum(all_lens) // args.grad_accumulation_step
logger.info('############# Gradient Accumulation Info #############')
logger.info('number of step: %s', args.epochs * sum(all_lens))
logger.info('number of grad grad_accumulation step: %s', args.grad_accumulation_step)
logger.info('adjusted number of step: %s', num_all_batches)
logger.info('############# Gradient Accumulation Info #############')
if len(train_data_list) > 1 and args.ratio > 0:
num_all_batches = int(args.epochs * (len(train_data_list[0]) * (1 + args.ratio)))
model = MTDNNModel(opt, state_dict=state_dict, num_train_step=num_all_batches)
if args.resume and args.model_ckpt:
logger.info('loading model from %s', args.model_ckpt)
model.load(args.model_ckpt)
# model meta str
headline = '############# Model Arch of MT-DNN #############'
# print network
logger.debug('\n{}\n{}\n'.format(headline, model.network))
# dump config
config_file = os.path.join(args.output_dir, 'config.json')
with open(config_file, 'a', encoding='utf-8') as writer:
writer.write('{}\n'.format(json.dumps(opt)))
writer.write('\n{}\n{}\n'.format(headline, model.network))
logger.info("Total number of params: %s", model.total_param)
# tensorboard
if args.tensorboard:
args.tensorboard_logdir = os.path.join(args.output_dir, args.tensorboard_logdir)
tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)
for epoch in range(0, args.epochs):
logger.warning('At epoch %s', epoch)
for train_data in train_data_list:
train_data.reset()
start = datetime.now()
all_indices = []
if len(train_data_list) > 1 and args.ratio > 0:
main_indices = [0] * len(train_data_list[0])
extra_indices = []
for i in range(1, len(train_data_list)):
extra_indices += [i] * len(train_data_list[i])
random_picks = int(min(len(train_data_list[0]) * args.ratio, len(extra_indices)))
extra_indices = np.random.choice(extra_indices, random_picks, replace=False)
if args.mix_opt > 0:
extra_indices = extra_indices.tolist()
random.shuffle(extra_indices)
all_indices = extra_indices + main_indices
else:
all_indices = main_indices + extra_indices.tolist()
else:
for i in range(1, len(train_data_list)):
all_indices += [i] * len(train_data_list[i])
if args.mix_opt > 0:
random.shuffle(all_indices)
all_indices += [0] * len(train_data_list[0])
if args.mix_opt < 1:
random.shuffle(all_indices)
for i in range(len(all_indices)):
task_id = all_indices[i]
batch_meta, batch_data = next(all_iters[task_id])
model.update(batch_meta, batch_data)
if model.local_updates % (args.log_per_updates * args.grad_accumulation_step) == 0 \
or model.local_updates == 1:
remaining_time = str(
(datetime.now() - start) / (i + 1) * (len(all_indices) - i - 1)
).split('.')[0]
logger.info('Task [%2d] updates[%6d] train loss[%.5f] remaining[%s]',
task_id, model.updates, model.train_loss.avg, remaining_time)
if args.tensorboard:
tensorboard.add_scalar('train/loss', model.train_loss.avg,
global_step=model.updates)
if args.save_per_updates_on \
and (model.local_updates % (
args.save_per_updates * args.grad_accumulation_step) == 0):
model_file = os.path.join(args.output_dir, f'model_{epoch}_{model.updates}.pt')
logger.info('Saving mt-dnn model to %s', model_file)
model.save(model_file)
for idx, dataset in enumerate(args.test_datasets):
task = dataset.split('_')[0]
label_mapper = task_defs.label_mapper_map[task]
metric_meta = task_defs.metric_meta_map[task]
# dev
data = dev_data_list[idx]
with torch.no_grad():
metrics, predictions, scores, golds, ids = eval_model(
model, data, metric_meta, args.cuda, True, label_mapper)
for key, val in metrics.items():
if args.tensorboard:
tensorboard.add_scalar(f'dev/{dataset}/{key}', val, global_step=epoch)
logger.warning('Task %s - epoch %s - Dev %s: %s', dataset, epoch, key, val)
path = os.path.join(args.output_dir, f'{dataset}_dev_scores_{epoch}.json')
result = {'metrics': metrics, 'predictions': predictions, 'uids': ids, 'scores': scores}
dump(path, result)
path = os.path.join(args.output_dir, f'{dataset}_dev_scores_{epoch}_2.json')
dump2(path, ids, scores, predictions)
# test
data = test_data_list[idx]
with torch.no_grad():
metrics, predictions, scores, golds, ids = eval_model(
model, data, metric_meta, args.cuda, True, label_mapper)
for key, val in metrics.items():
if args.tensorboard:
tensorboard.add_scalar(f'test/{dataset}/{key}', val, global_step=epoch)
logger.warning('Task %s - epoch %s - Test %s: %s', dataset, epoch, key, val)
path = os.path.join(args.output_dir, f'{dataset}_test_scores_{epoch}.json')
result = {'metrics': metrics, 'predictions': predictions, 'uids': ids, 'scores': scores}
dump(path, result)
path = os.path.join(args.output_dir, f'{dataset}_test_scores_{epoch}_2.json')
dump2(path, ids, scores, predictions)
logger.info('[new test scores saved.]')
if not args.not_save:
model_file = os.path.join(args.output_dir, f'model_{epoch}.pt')
model.save(model_file)
if args.tensorboard:
tensorboard.close()
if __name__ == '__main__':
main()
|
3df0a8ad1de8a7bb931d9e6cfb08bbb412e9d728
|
99d79ada2d3b7746573f071823ec61f5f853d7a3
|
/magma/mantle/queue.py
|
bfefffcacc918a7be16d60f7978cb8e753eb816f
|
[
"MIT"
] |
permissive
|
phanrahan/magma
|
d8062c6163e2c2c2cedef82317dc8cc40038220a
|
b05fe5303ed17e668c6ec2ec3558cd5a52eff787
|
refs/heads/master
| 2023-08-23T18:08:22.494869
| 2023-08-08T18:53:05
| 2023-08-17T16:16:44
| 84,332,281
| 227
| 21
|
NOASSERTION
| 2023-09-14T21:32:19
| 2017-03-08T14:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
queue.py
|
"""
Based on
https://github.com/chipsalliance/chisel3/blob/master/src/main/scala/chisel3/util/Decoupled.scala
(missing support for `flow` and `pipe` parameters).
"""
from magma.clock_io import ClockIO
from magma.conversions import enable
from magma.generator import Generator2
from magma.interface import IO
from magma.mantle.counter import Counter
from magma.primitives.memory import Memory
from magma.primitives.register import Register
from magma.types.handshake import DeqIO, EnqIO
from magma.t import Kind
class Queue(Generator2):
"""
Queue implementation using a magma Memory primitive as a register file.
Write (enqueue) and read (dequeue) counters point to the approriate
addresses in memory.
Since enq_ptr == deq_ptr could mean the queue is full or empty, this
implementation uses an extra bit to track when the queue is full.
"""
def __init__(self, entries: int, T: Kind):
assert entries >= 0
self.io = IO(
# NOTE(leonardt): Note that the direction of enq/deq are flipped
# since this is a client (consumer)
enq=DeqIO[T],
deq=EnqIO[T]
) + ClockIO()
# Data storage
ram = Memory(entries, T)()
# Read/write pointers
enq_ptr = Counter(entries, has_enable=True)()
deq_ptr = Counter(entries, has_enable=True)()
# Since the pointers can match when it's empty or full, we use an extra
# bit to track when it may be full (there's been a write without a
# read)
maybe_full = Register(init=False, has_enable=True)()
ptr_match = enq_ptr.O == deq_ptr.O
# Empty/full determined by pointers matching and maybe_full bit
empty = ptr_match & ~maybe_full.O
full = ptr_match & maybe_full.O
# deq data is valid when not empty
self.io.deq.valid @= ~empty
# enq is ready when not full
self.io.enq.ready @= ~full
# do enq/deq when ready/valid are high
do_enq = self.io.enq.fired()
do_deq = self.io.deq.fired()
# write enq data when do_enq
ram.write(self.io.enq.data, enq_ptr.O, enable(do_enq))
# Increment pointers on read/write
enq_ptr.CE @= enable(do_enq)
deq_ptr.CE @= enable(do_deq)
# Set maybe full when enq without deq
maybe_full.I @= enable(do_enq)
maybe_full.CE @= enable(do_enq != do_deq)
# Deq data from storage
self.io.deq.data @= ram[deq_ptr.O]
|
78b9af8dd716b0eb6af069c8ef0ab9997fdd4c9f
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/forecasting/adapters/__init__.py
|
522ab5d6cb15078171f5e874767207ce3ef4ed99
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
__init__.py
|
"""Module containing adapters to other forecasting framework packages."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__all__ = ["HCrystalBallAdapter"]
from sktime.forecasting.adapters._hcrystalball import HCrystalBallAdapter
|
f24879467b065ea5486dee1ffd9a76201cd85317
|
cb35df97989fcc46831a8adb8de3434b94fd2ecd
|
/tests/test_materials.py
|
4a37fbebb6c10fdf3c48fcc30ba069f64d2bc63c
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
facebookresearch/pytorch3d
|
6d93b28c0f36a4b7efa0a8143726200c252d3502
|
a3d99cab6bf5eb69be8d5eb48895da6edd859565
|
refs/heads/main
| 2023-09-01T16:26:58.756831
| 2023-08-26T20:55:56
| 2023-08-26T20:55:56
| 217,433,767
| 7,964
| 1,342
|
NOASSERTION
| 2023-08-25T10:00:26
| 2019-10-25T02:23:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,069
|
py
|
test_materials.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from pytorch3d.renderer.materials import Materials
from .common_testing import TestCaseMixin
class TestMaterials(TestCaseMixin, unittest.TestCase):
def test_init(self):
"""
Initialize Materials class with the default values.
"""
device = torch.device("cuda:0")
mat = Materials(device=device)
self.assertTrue(torch.is_tensor(mat.ambient_color))
self.assertTrue(torch.is_tensor(mat.diffuse_color))
self.assertTrue(torch.is_tensor(mat.specular_color))
self.assertTrue(torch.is_tensor(mat.shininess))
self.assertTrue(mat.ambient_color.device == device)
self.assertTrue(mat.diffuse_color.device == device)
self.assertTrue(mat.specular_color.device == device)
self.assertTrue(mat.shininess.device == device)
self.assertTrue(mat.ambient_color.shape == (1, 3))
self.assertTrue(mat.diffuse_color.shape == (1, 3))
self.assertTrue(mat.specular_color.shape == (1, 3))
self.assertTrue(mat.shininess.shape == (1,))
def test_materials_clone_to(self):
device = torch.device("cuda:0")
cpu = torch.device("cpu")
mat = Materials()
new_mat = mat.clone().to(device)
self.assertTrue(mat.ambient_color.device == cpu)
self.assertTrue(mat.diffuse_color.device == cpu)
self.assertTrue(mat.specular_color.device == cpu)
self.assertTrue(mat.shininess.device == cpu)
self.assertTrue(new_mat.ambient_color.device == device)
self.assertTrue(new_mat.diffuse_color.device == device)
self.assertTrue(new_mat.specular_color.device == device)
self.assertTrue(new_mat.shininess.device == device)
self.assertSeparate(new_mat.ambient_color, mat.ambient_color)
self.assertSeparate(new_mat.diffuse_color, mat.diffuse_color)
self.assertSeparate(new_mat.specular_color, mat.specular_color)
self.assertSeparate(new_mat.shininess, mat.shininess)
def test_initialize_materials_broadcast(self):
materials = Materials(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(1, 3),
specular_color=torch.randn(1, 3),
shininess=torch.randn(1),
)
self.assertTrue(materials.ambient_color.shape == (10, 3))
self.assertTrue(materials.diffuse_color.shape == (10, 3))
self.assertTrue(materials.specular_color.shape == (10, 3))
self.assertTrue(materials.shininess.shape == (10,))
def test_initialize_materials_broadcast_fail(self):
"""
Batch dims have to be the same or 1.
"""
with self.assertRaises(ValueError):
Materials(
ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3)
)
def test_initialize_materials_dimensions_fail(self):
"""
Color should have shape (N, 3) or (1, 3), Shininess should have shape
(1), (1, 1), (N) or (N, 1)
"""
with self.assertRaises(ValueError):
Materials(ambient_color=torch.randn(10, 4))
with self.assertRaises(ValueError):
Materials(shininess=torch.randn(10, 2))
def test_initialize_materials_mixed_inputs(self):
mat = Materials(ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),))
self.assertTrue(mat.ambient_color.shape == (1, 3))
self.assertTrue(mat.diffuse_color.shape == (1, 3))
def test_initialize_materials_mixed_inputs_broadcast(self):
mat = Materials(ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),))
self.assertTrue(mat.ambient_color.shape == (10, 3))
self.assertTrue(mat.diffuse_color.shape == (10, 3))
self.assertTrue(mat.specular_color.shape == (10, 3))
self.assertTrue(mat.shininess.shape == (10,))
|
c420f3871bf0fe8730beb6d41ccefc394a8e5cf1
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltESPKFFittingSmootherForL2Muon_cfi.py
|
cb1dad8e625a5be194199a515f0da68882c0a11e
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
hltESPKFFittingSmootherForL2Muon_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltESPKFFittingSmootherForL2Muon = cms.ESProducer("KFFittingSmootherESProducer",
BreakTrajWith2ConsecutiveMissing = cms.bool(False),
ComponentName = cms.string('hltESPKFFittingSmootherForL2Muon'),
EstimateCut = cms.double(-1.0),
Fitter = cms.string('hltESPKFTrajectoryFitterForL2Muon'),
LogPixelProbabilityCut = cms.double(-16.0),
MaxFractionOutliers = cms.double(0.3),
MaxNumberOfOutliers = cms.int32(3),
MinDof = cms.int32(2),
MinNumberOfHits = cms.int32(5),
NoInvalidHitsBeginEnd = cms.bool(False),
NoOutliersBeginEnd = cms.bool(False),
RejectTracks = cms.bool(True),
Smoother = cms.string('hltESPKFTrajectorySmootherForL2Muon'),
appendToDataLabel = cms.string('')
)
|
f6c174729b2093bec96299dcc59e7b3d56c5508d
|
53940f2aaf1537bb0c701f3963225aae5edc56e0
|
/src/deepqmc/wf/nn_wave_function/env.py
|
65bedb40d6b50fb2fd03bbd57c0ed3d695fd8177
|
[
"MIT"
] |
permissive
|
deepqmc/deepqmc
|
d44d32c7ed528bc2c0a32e0c8a7f1d3ce70ad007
|
bf297a34c0304f9deb3a5ad704ddd3a8a3d7eea0
|
refs/heads/master
| 2023-08-16T16:30:54.227839
| 2023-08-02T13:24:22
| 2023-08-08T09:18:02
| 226,350,919
| 313
| 64
|
MIT
| 2023-09-14T18:06:13
| 2019-12-06T14:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,631
|
py
|
env.py
|
import haiku as hk
import jax.numpy as jnp
from ...physics import pairwise_diffs
from ...utils import norm, unflatten
class ExponentialEnvelopes(hk.Module):
r"""Create exponential envelopes centered on the nuclei."""
def __init__(
self,
mol,
n_determinants,
*,
isotropic,
per_shell,
per_orbital_exponent,
spin_restricted,
init_to_ones,
):
super().__init__()
shells = []
for i, (z, n_shell, n_pp_shell) in enumerate(
zip(mol.charges, mol.n_shells, mol.n_pp_shells)
):
for k in range(n_pp_shell, n_shell if per_shell else n_pp_shell + 1):
shells.append((i, z / (k + 1)))
self.center_idx, zetas = map(jnp.array, zip(*shells)) # [n_env]
self.init_to_ones = init_to_ones
self.pi = [
self.get_pi_for_one_spin(
name, n_determinants, mol.n_up, mol.n_down, len(zetas)
)
for name in (['pi'] if spin_restricted else ['pi_up', 'pi_down'])
] # [n_orb, n_env]
if per_orbital_exponent:
zetas = jnp.tile(
zetas[None], (n_determinants * (mol.n_up + mol.n_down), 1)
) # [n_orb, n_env]
if not isotropic:
zetas = zetas[..., None, None] * jnp.eye(3)
self.zetas = [
self.get_zeta_for_one_spin(name, zetas)
for name in (['zetas'] if spin_restricted else ['zetas_up', 'zetas_down'])
] # [n_env] or [n_orb, n_env] or [n_env, 3, 3] or [n_orb, n_env, 3, 3]
self.isotropic = isotropic
self.per_orbital_exponent = per_orbital_exponent
self.spin_restricted = spin_restricted
self.n_up = mol.n_up
self.n_det = n_determinants
def _call_for_one_spin(self, zeta, pi, diffs):
d = diffs[..., self.center_idx, :-1]
if self.isotropic:
d = norm(d, safe=True) # [n_el, n_env]
if self.per_orbital_exponent:
d = d[:, None] # [n_el, 1, n_env]
exponent = jnp.abs(zeta * d) # [n_el, n_env] or [n_el, n_orb, n_env]
else:
exponent = norm(
jnp.einsum('...ers,ies->i...er', zeta, d), safe=True
) # [n_el, n_env] or [n_el, n_orb, n_env]
if not self.per_orbital_exponent:
exponent = exponent[:, None] # [n_el, 1, n_env]
orbs = (pi * jnp.exp(-exponent)).sum(axis=-1) # [n_el, n_orb]
return unflatten(orbs, -1, (self.n_det, -1)).swapaxes(-2, -3)
def get_pi_for_one_spin(self, name, n_determinants, n_up, n_down, n_env):
return hk.get_parameter(
name,
(n_determinants * (n_up + n_down), n_env),
init=lambda s, d: jnp.ones(s)
+ (0 if self.init_to_ones else hk.initializers.VarianceScaling(1.0)(s, d)),
)
def get_zeta_for_one_spin(self, name, zeta):
return hk.get_parameter(
name,
zeta.shape,
init=lambda shape, dtype: (
jnp.ones(shape) if self.init_to_ones else jnp.copy(zeta)
),
)
def __call__(self, phys_conf):
diffs = pairwise_diffs(phys_conf.r, phys_conf.R)
if self.spin_restricted:
return self._call_for_one_spin(self.zetas[0], self.pi[0], diffs)
else:
orbs = [
self._call_for_one_spin(zeta, pi, diff)
for zeta, pi, diff in zip(
self.zetas, self.pi, jnp.split(diffs, (self.n_up,))
)
]
return jnp.concatenate(orbs, axis=-2)
|
b2620023b28c55ed7d5f34bb79313e1012a2aae7
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/带权图最短路和最小生成树/最短路扩展应用/AcWing 341. 最优贸易.py
|
b2ed106e964e00f263e776f966f539124beca2cd
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
AcWing 341. 最优贸易.py
|
# 枚举中间点
# 从 1 走到 i 的过程中,买入水晶球的最低价格 dmin[i];
# 从 i 走到 n 的过程中,卖出水晶球的最高价格 dmax[i];
# 然后枚举每个城市作为买卖的中间城市,求出 dmax[i] - dmin[i] 的最大值即可。
|
023d7391a983facc8e38fd61eeca62c6ad0d294a
|
e17660bcf07fe3221a18dc3da68f06c85c40cbf9
|
/src/py2app/recipes/multiprocessing.py
|
b3b6e5870d4828065e6bfa6693dd0ef814529d2f
|
[
"MIT",
"Python-2.0"
] |
permissive
|
ronaldoussoren/py2app
|
bca832cab41b9a365342d400aed4ebbbe80bed0c
|
e9c7a88f34d79c41a3a344ccc14cd97c24904b9f
|
refs/heads/master
| 2023-09-01T05:14:53.388393
| 2023-04-16T08:34:06
| 2023-04-16T08:34:06
| 233,826,136
| 292
| 34
|
NOASSERTION
| 2023-08-30T17:54:44
| 2020-01-14T11:27:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
multiprocessing.py
|
import textwrap
import typing
from io import StringIO
from modulegraph.modulegraph import ModuleGraph
from .. import build_app
from ._types import RecipeInfo
def check(cmd: "build_app.py2app", mf: ModuleGraph) -> typing.Optional[RecipeInfo]:
m = mf.findNode("multiprocessing")
if m is None:
return None
# This is a fairly crude hack to get multiprocessing to do the
# right thing without user visible changes in py2app.
# In particular: multiprocessing assumes that a special command-line
# should be used when "sys.frozen" is set. That is not necessary
# with py2app even though it does set "sys.frozen".
prescript = textwrap.dedent(
"""\
def _boot_multiprocessing():
import sys
import multiprocessing.spawn
orig_get_command_line = multiprocessing.spawn.get_command_line
def wrapped_get_command_line(**kwargs):
orig_frozen = sys.frozen
del sys.frozen
try:
return orig_get_command_line(**kwargs)
finally:
sys.frozen = orig_frozen
multiprocessing.spawn.get_command_line = wrapped_get_command_line
_boot_multiprocessing()
"""
)
return {"prescripts": [StringIO(prescript)]}
|
523d839b9d2bd858b06ca5a9b2f18b565ff25faf
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/tests/test_models/test_utils/test_sampling_utils.py
|
4da730ffff207057d5699655b9ba883b90dc30e7
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
test_sampling_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmagic.models.utils.sampling_utils import label_sample_fn, noise_sample_fn
def test_noise_sample_fn():
# test noise is a callable function
noise_callable = torch.randn
noise = noise_sample_fn(noise_callable, noise_size=(2, 3), device='cpu')
assert noise.shape == (1, 2, 3)
def test_label_sample_fn():
label = label_sample_fn(None, num_classes=-1)
assert label is None
label = label_sample_fn(None)
assert label is None
label_inp = torch.randint(0, 10, (1, ))
assert (label_sample_fn(label_inp, num_classes=10) == label_inp).all()
# np.ndarray input
label_inp = np.array([3, 2, 1])
tar_label = torch.LongTensor([3, 2, 1])
assert (label_sample_fn(label_inp, num_classes=10,
device='cpu') == tar_label).all()
# list input
label_inp = [0, 1, 1]
tar_label = torch.LongTensor([0, 1, 1])
assert (label_sample_fn(label_inp, num_classes=10) == tar_label).all()
label_inp = [np.array([0]), np.array([1]), np.array([0])]
tar_label = torch.LongTensor([0, 1, 0])
assert (label_sample_fn(label_inp, num_classes=10) == tar_label).all()
label_inp = [
torch.LongTensor([0]),
torch.LongTensor([1]),
torch.LongTensor([0])
]
tar_label = torch.LongTensor([0, 1, 0])
assert (label_sample_fn(label_inp, num_classes=10) == tar_label).all()
# list input --> raise error
label_inp = ['1', '2']
with pytest.raises(AssertionError):
label_sample_fn(label_inp, num_classes=10)
# callable input
def label_function(num_batches):
return torch.randint(0, 3, size=(num_batches, ))
assert label_sample_fn(label_function, num_batches=3).shape == (3, )
# test raise error
with pytest.raises(AssertionError):
label_sample_fn(torch.randn(3, 3), num_classes=10)
with pytest.raises(AssertionError):
label_sample_fn(torch.randint(0, 3, (2, 2)))
with pytest.raises(AssertionError):
label_sample_fn([0, 10, 2, 3], num_classes=5)
|
29fb2d5a0eabaf4fe512d4e18978b07a04eaac0d
|
c5f7ea2f03ce6e0ff398521031cadc83c023a4bf
|
/pythonping/__init__.py
|
2b3c29e2f07264c917a49c0ddf634d23274eef7d
|
[
"MIT"
] |
permissive
|
alessandromaggio/pythonping
|
aec00a44f9386834919ed1e7c30efc88485e7235
|
afa1e9588002bb2cade23451326bdbea06e7496e
|
refs/heads/master
| 2023-04-09T17:12:36.659125
| 2022-10-25T10:31:35
| 2022-10-25T10:31:35
| 126,620,308
| 193
| 88
|
MIT
| 2023-08-19T23:13:36
| 2018-03-24T16:51:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,355
|
py
|
__init__.py
|
import sys
from random import randint
from . import network, executor, payload_provider
from .utils import random_text
# this needs to be available across all thread usages and will hold ints
SEED_IDs = []
def ping(target,
timeout=2,
count=4,
size=1,
interval=0,
payload=None,
sweep_start=None,
sweep_end=None,
df=False,
verbose=False,
out=sys.stdout,
match=False,
source=None,
out_format='legacy'):
"""Pings a remote host and handles the responses
:param target: The remote hostname or IP address to ping
:type target: str
:param timeout: Time in seconds before considering each non-arrived reply permanently lost.
:type timeout: Union[int, float]
:param count: How many times to attempt the ping
:type count: int
:param size: Size of the entire packet to send
:type size: int
:param interval: Interval to wait between pings
:type interval: int
:param payload: Payload content, leave None if size is set to use random text
:type payload: Union[str, bytes]
:param sweep_start: If size is not set, initial size in a sweep of sizes
:type sweep_start: int
:param sweep_end: If size is not set, final size in a sweep of sizes
:type sweep_end: int
:param df: Don't Fragment flag value for IP Header
:type df: bool
:param verbose: Print output while performing operations
:type verbose: bool
:param out: Stream to which redirect the verbose output
:type out: stream
:param match: Do payload matching between request and reply (default behaviour follows that of Windows which is
by packet identifier only, Linux behaviour counts a non equivalent payload in reply as fail, such as when pinging
8.8.8.8 with 1000 bytes and reply is truncated to only the first 74 of request payload with packet identifiers
the same in request and reply)
:type match: bool
:param repr_format: How to __repr__ the response. Allowed: legacy, None
:type repr_format: str
:return: List with the result of each ping
:rtype: executor.ResponseList"""
provider = payload_provider.Repeat(b'', 0)
if sweep_start and sweep_end and sweep_end >= sweep_start:
if not payload:
payload = random_text(sweep_start)
provider = payload_provider.Sweep(payload, sweep_start, sweep_end)
elif size and size > 0:
if not payload:
payload = random_text(size)
provider = payload_provider.Repeat(payload, count)
options = ()
if df:
options = network.Socket.DONT_FRAGMENT
# Fix to allow for pythonping multithreaded usage;
# no need to protect this loop as no one will ever surpass 0xFFFF amount of threads
while True:
# seed_id needs to be less than or equal to 65535 (as original code was seed_id = getpid() & 0xFFFF)
seed_id = randint(0x1, 0xFFFF)
if seed_id not in SEED_IDs:
SEED_IDs.append(seed_id)
break
comm = executor.Communicator(target, provider, timeout, interval, socket_options=options, verbose=verbose, output=out,
seed_id=seed_id, source=source, repr_format=out_format)
comm.run(match_payloads=match)
SEED_IDs.remove(seed_id)
return comm.responses
|
2a47150545ee0d8ca07af97c3e8438d921273b00
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/old_py2/controllers/apidocs_controller.py
|
c0405bf66b4a45e8c983751f378d00bab9c55b13
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
apidocs_controller.py
|
import os
from google.appengine.ext.webapp import template
from consts.notification_type import NotificationType
from controllers.base_controller import CacheableHandler
from template_engine import jinja2_engine
class AddDataHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "add_data_instructions"
def __init__(self, *args, **kw):
super(AddDataHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('add_data.html', self.template_values)
|
3d4b9ab67747479069a5b70f0f5e0d6c3e218c59
|
aee26a4c731a84481a499679c3d4cef9ec954aed
|
/tacker/tests/functional/base.py
|
3b4bd560f9c71deab183db0d998c55072efe5a42
|
[
"Apache-2.0"
] |
permissive
|
openstack/tacker
|
6976cbee3afadfd9390849b56da2837feb93e912
|
9c7918f0b501cdeaffae40f585b76fc92b8e196e
|
refs/heads/master
| 2023-09-04T01:22:43.106241
| 2023-08-31T00:06:42
| 2023-08-31T00:42:20
| 21,259,951
| 125
| 172
|
Apache-2.0
| 2021-05-09T06:13:08
| 2014-06-27T01:11:56
|
Python
|
UTF-8
|
Python
| false
| false
| 20,311
|
py
|
base.py
|
# Copyright 2015 Brocade Communications System, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import yaml
from blazarclient import client as blazar_client
from cinderclient import client as cinder_client
from glanceclient.v2 import client as glance_client
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient import adapter
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from tempest.lib import base
from tacker.common import clients
from tacker.common import utils
from tacker.plugins.common import constants as evt_constants
from tacker.tests import constants
from tacker.tests.utils import read_file
from tacker import version
from tackerclient.common import exceptions
from tackerclient.v1_0 import client as tacker_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SessionClient(adapter.Adapter):
def request(self, *args, **kwargs):
kwargs.setdefault('authenticated', False)
kwargs.setdefault('raise_exc', False)
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = kwargs.setdefault('headers', {})
headers.setdefault('Accept', content_type)
try:
kwargs.setdefault('data', kwargs.pop('body'))
except KeyError:
pass
if kwargs.get('data'):
headers.setdefault('Content-Type', content_type)
return super(SessionClient, self).request(*args, **kwargs)
def _decode_json(self, response):
body = response.text
if body and response.headers['Content-Type'] == 'text/plain':
return body
elif body and response.headers['Content-Type'] == 'text/x-sh':
return body
elif body:
return jsonutils.loads(body)
else:
return ""
def do_request(self, url, method, **kwargs):
kwargs.setdefault('authenticated', True)
resp = self.request(url, method, **kwargs)
if ('Content-Type' not in resp.headers or
resp.headers['Content-Type'] == 'application/zip'):
return resp, resp.content
body = self._decode_json(resp)
return resp, body
class BaseTackerTest(base.BaseTestCase):
"""Base test case class for all Tacker API tests."""
@classmethod
def setUpClass(cls):
super(BaseTackerTest, cls).setUpClass()
kwargs = {}
cfg.CONF(args=['--config-file', '/etc/tacker/tacker.conf'],
project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
cls.client = cls.tackerclient()
cls.http_client = cls.tacker_http_client()
cls.h_client = cls.heatclient()
cls.glance_client = cls.glanceclient()
cls.cinder_client = cls.cinderclient()
cls.nova_client = cls.novaclient()
@classmethod
def get_credentials(cls, vim_conf_file=None):
if vim_conf_file is None:
vim_conf_file = 'local-vim.yaml'
vim_params = yaml.safe_load(read_file(vim_conf_file))
vim_params['auth_url'] += '/v3'
return vim_params
@classmethod
def get_auth_session(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
auth = v3.Password(
auth_url=vim_params['auth_url'],
username=vim_params['username'],
password=vim_params['password'],
project_name=vim_params['project_name'],
user_domain_name=vim_params['user_domain_name'],
project_domain_name=vim_params['project_domain_name'])
verify = utils.str_to_bool(vim_params.pop('cert_verify', 'False'))
auth_ses = session.Session(auth=auth, verify=verify)
return auth_ses
@classmethod
def tacker_http_client(cls, vim_conf_file=None):
auth_session = cls.get_auth_session(vim_conf_file)
return SessionClient(session=auth_session,
service_type='nfv-orchestration',
region_name='RegionOne')
@classmethod
def tackerclient(cls, vim_conf_file=None):
auth_session = cls.get_auth_session(vim_conf_file)
return tacker_client.Client(session=auth_session, retries=5)
@classmethod
def novaclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
auth = v3.Password(auth_url=vim_params['auth_url'],
username=vim_params['username'],
password=vim_params['password'],
project_name=vim_params['project_name'],
user_domain_name=vim_params['user_domain_name'],
project_domain_name=vim_params['project_domain_name'])
verify = utils.str_to_bool(vim_params.pop('cert_verify', 'False'))
auth_ses = session.Session(auth=auth, verify=verify)
return nova_client.Client(constants.NOVA_CLIENT_VERSION,
session=auth_ses)
@classmethod
def neutronclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
auth = v3.Password(auth_url=vim_params['auth_url'],
username=vim_params['username'],
password=vim_params['password'],
project_name=vim_params['project_name'],
user_domain_name=vim_params['user_domain_name'],
project_domain_name=vim_params['project_domain_name'])
verify = utils.str_to_bool(vim_params.pop('cert_verify', 'False'))
auth_ses = session.Session(auth=auth, verify=verify)
return neutron_client.Client(session=auth_ses)
@classmethod
def heatclient(cls, vim_conf_file=None):
if vim_conf_file is None:
vim_conf_file = 'local-vim.yaml'
data = yaml.safe_load(read_file(vim_conf_file))
data['auth_url'] = data['auth_url'] + '/v3'
domain_name = data.pop('domain_name')
data['user_domain_name'] = domain_name
data['project_domain_name'] = domain_name
return clients.OpenstackClients(auth_attr=data).heat
@classmethod
def blazarclient(cls, vim_conf_file=None):
data = cls.get_credentials(vim_conf_file)
domain_name = data.pop('domain_name')
data['user_domain_name'] = domain_name
data['project_domain_name'] = domain_name
auth_ses = (clients.OpenstackClients(auth_attr=data)
.keystone_session.session)
return blazar_client.Client(session=auth_ses,
service_type='reservation',
interface='public',
region_name='RegionOne')
@classmethod
def glanceclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
auth = v3.Password(auth_url=vim_params['auth_url'],
username=vim_params['username'],
password=vim_params['password'],
project_name=vim_params['project_name'],
user_domain_name=vim_params['user_domain_name'],
project_domain_name=vim_params['project_domain_name'])
verify = utils.str_to_bool(vim_params.pop('cert_verify', 'False'))
auth_ses = session.Session(auth=auth, verify=verify)
return glance_client.Client(session=auth_ses)
@classmethod
def aodh_http_client(cls, vim_conf_file=None):
auth_session = cls.get_auth_session(vim_conf_file)
return SessionClient(session=auth_session,
service_type='alarming',
region_name='RegionOne')
@classmethod
def cinderclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
auth = v3.Password(auth_url=vim_params['auth_url'],
username=vim_params['username'],
password=vim_params['password'],
project_name=vim_params['project_name'],
user_domain_name=vim_params['user_domain_name'],
project_domain_name=vim_params['project_domain_name'])
verify = utils.str_to_bool(vim_params.pop('cert_verify', 'False'))
auth_ses = session.Session(auth=auth, verify=verify)
return cinder_client.Client(constants.CINDER_CLIENT_VERSION,
session=auth_ses)
def get_vdu_resource(self, stack_id, res_name):
return self.h_client.resources.get(stack_id, res_name)
def wait_until_vnf_status(self, vnf_id, target_status, timeout,
sleep_interval):
start_time = int(time.time())
status = None
while True:
try:
vnf_result = self.client.show_vnf(vnf_id)
status = vnf_result['vnf']['status']
if status == target_status:
break
except exceptions.InternalServerError:
pass
if int(time.time()) - start_time > timeout:
break
time.sleep(sleep_interval)
self.assertEqual(status, target_status,
"vnf %(vnf_id)s with status %(status)s is"
" expected to be %(target)s" %
{"vnf_id": vnf_id, "status": status,
"target": target_status})
def wait_until_vnf_active(self, vnf_id, timeout, sleep_interval):
self.wait_until_vnf_status(vnf_id, 'ACTIVE', timeout,
sleep_interval)
def verify_vnf_update(self, vnf_id):
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'PENDING_HEAL',
constants.VNF_CIRROS_PENDING_HEAL_TIMEOUT,
constants.PENDING_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
def wait_until_vnf_delete(self, vnf_id, timeout, sleep_interval=1):
start_time = int(time.time())
while True:
status = None
try:
vnf_result = self.client.show_vnf(vnf_id)
status = vnf_result['vnf']['status']
except exceptions.NotFound:
return
except Exception as e:
LOG.error("Failed to get vnf status: %s", str(e))
if status is not None and status != 'PENDING_DELETE':
raise Exception("Failed with status: %s" % status)
if int(time.time()) - start_time > timeout:
raise Exception("Timeout for deleting vnf %s.",
vnf_id)
time.sleep(sleep_interval)
def wait_until_vnf_dead(self, vnf_id, timeout, sleep_interval):
self.wait_until_vnf_status(vnf_id, 'DEAD', timeout,
sleep_interval)
def validate_vnf_instance(self, vnfd_instance, vnf_instance):
self.assertIsNotNone(vnf_instance)
self.assertIsNotNone(vnf_instance['vnf']['id'])
self.assertIsNotNone(vnf_instance['vnf']['instance_id'])
if vnfd_instance:
self.assertEqual(vnf_instance['vnf']['vnfd_id'], vnfd_instance[
'vnfd']['id'])
def verify_vnf_restart(self, vnfd_instance, vnf_instance):
vnf_id = vnf_instance['vnf']['id']
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
'mgmt_ip_address'])
self.wait_until_vnf_dead(
vnf_id,
constants.VNF_CIRROS_DEAD_TIMEOUT,
constants.DEAD_SLEEP_TIME)
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
def verify_vnf_monitor_events(self, vnf_id, vnf_state_list):
for state in vnf_state_list:
params = {'resource_id': vnf_id, 'resource_state': state,
'event_type': evt_constants.RES_EVT_MONITOR}
vnf_evt_list = self.client.list_vnf_events(**params)
mesg = ("%s - state transition expected." % state)
self.assertIsNotNone(vnf_evt_list['vnf_events'], mesg)
def verify_vnf_crud_events(self, vnf_id, evt_type, res_state,
tstamp=None, cnt=1):
params = {'resource_id': vnf_id,
'resource_state': res_state,
'resource_type': evt_constants.RES_TYPE_VNF,
'event_type': evt_type}
if tstamp:
params['timestamp'] = tstamp
vnf_evt_list = self.client.list_vnf_events(**params)
self.assertIsNotNone(vnf_evt_list['vnf_events'],
"List of VNF events are Empty")
self.assertEqual(cnt, len(vnf_evt_list['vnf_events']))
def verify_vnfd_events(self, vnfd_id, evt_type, res_state,
tstamp=None, cnt=1):
params = {'resource_id': vnfd_id,
'resource_state': res_state,
'resource_type': evt_constants.RES_TYPE_VNFD,
'event_type': evt_type}
if tstamp:
params['timestamp'] = tstamp
vnfd_evt_list = self.client.list_vnfd_events(**params)
self.assertIsNotNone(vnfd_evt_list['vnfd_events'],
"List of VNFD events are Empty")
self.assertEqual(cnt, len(vnfd_evt_list['vnfd_events']))
def get_vim(self, vim_list, vim_name):
if len(vim_list.values()) == 0:
assert False, "vim_list is Empty: Default VIM is missing"
for vim_list in vim_list.values():
for vim in vim_list:
if vim['name'] == vim_name:
return vim
return None
def verify_antispoofing_in_stack(self, stack_id, resource_name):
resource_types = self.h_client.resources
resource_details = resource_types.get(stack_id=stack_id,
resource_name=resource_name)
resource_dict = resource_details.to_dict()
self.assertTrue(resource_dict['attributes']['port_security_enabled'])
def trigger_vnf(self, vnf, policy_name, policy_action):
credential = 'g0jtsxu9'
body = {"trigger": {'policy_name': policy_name,
'action_name': policy_action,
'params': {
'data': {'alarm_id': '35a80852-e24f-46ed-bd34-e2f831d00172', 'current': 'alarm'}, # noqa
'credential': credential}
}
}
self.client.post('/vnfs/%s/triggers' % vnf, body)
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
def create_cinder_volume(cls, vol_size, vol_name):
try:
cinder_volume = cls.cinder_client.volumes.create(vol_size,
name=vol_name)
except Exception as e:
LOG.error("Failed to create cinder volume: %s", str(e))
return None
return cinder_volume.id
def delete_cinder_volume(cls, vol_id):
try:
cls.cinder_client.volumes.delete(vol_id)
except Exception as e:
LOG.error("Failed to delete cinder volume: %s", str(e))
def vnfd_and_vnf_create(self, vnfd_file, vnf_name, volume_id=None,
volume_name=None):
input_yaml = read_file(vnfd_file)
tosca_dict = yaml.safe_load(input_yaml)
if volume_id is not None:
volume_detail = tosca_dict['topology_template']['inputs']
volume_detail[volume_name]['default'] = volume_id
tosca_arg = {'vnfd': {'name': vnf_name,
'attributes': {'vnfd': tosca_dict}}}
# Create vnfd with tosca template
vnfd_instance = self.client.create_vnfd(body=tosca_arg)
self.assertIsNotNone(vnfd_instance)
# Create vnf with vnfd_id
vnfd_id = vnfd_instance['vnfd']['id']
self.addCleanup(self.client.delete_vnfd, vnfd_id)
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
return vnfd_instance, vnf_instance, tosca_dict
def vnfd_and_vnf_create_inline(self, vnfd_file, vnf_name):
vnfd_instance = {}
input_yaml = read_file(vnfd_file)
tosca_dict = yaml.safe_load(input_yaml)
# create vnf directly from template
vnf_arg = {'vnf': {'vnfd_template': tosca_dict, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
return vnf_instance, tosca_dict
def _list_op_occs(self, filter_string='', http_client=None):
if http_client is None:
http_client = self.http_client
show_url = os.path.join(
self.base_vnf_lcm_op_occs_url)
resp, response_body = http_client.do_request(
show_url + filter_string, "GET")
return resp, response_body
def _assert_occ_list(self, resp, op_occs_list):
self.assertEqual(200, resp.status_code)
# Only check required parameters.
for op_occs_info in op_occs_list:
self.assertIsNotNone(op_occs_info.get('id'))
self.assertIsNotNone(op_occs_info.get('operationState'))
self.assertIsNotNone(op_occs_info.get('stateEnteredTime'))
self.assertIsNotNone(op_occs_info.get('vnfInstanceId'))
self.assertIsNotNone(op_occs_info.get('operation'))
self.assertIsNotNone(op_occs_info.get('isAutomaticInvocation'))
self.assertIsNotNone(op_occs_info.get('isCancelPending'))
_links = op_occs_info.get('_links')
self.assertIsNotNone(_links.get('self'))
self.assertIsNotNone(_links.get('self').get('href'))
self.assertIsNotNone(_links.get('vnfInstance'))
self.assertIsNotNone(_links.get('vnfInstance').get('href'))
self.assertIsNotNone(_links.get('grant'))
self.assertIsNotNone(_links.get('grant').get('href'))
|
ca72c056f3aa482e742cdab86c31a80e53cdf206
|
e9ef3cd143478660d098668a10e67544a42b5878
|
/Lib/corpuscrawler/crawl_tpi.py
|
54f5ca425d9457c2b7ab2ca68b8e2527ccf27e49
|
[
"Apache-2.0"
] |
permissive
|
google/corpuscrawler
|
a5c790c19b26e6397b768ce26cf12bbcb641eb90
|
10adaecf4ed5a7d0557c8e692c186023746eb001
|
refs/heads/master
| 2023-08-26T04:15:59.036883
| 2022-04-20T08:18:11
| 2022-04-20T08:18:11
| 102,909,145
| 119
| 40
|
NOASSERTION
| 2022-04-20T08:18:12
| 2017-09-08T22:21:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,553
|
py
|
crawl_tpi.py
|
# coding: utf-8
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
from corpuscrawler.util import cleantext, crawl_udhr, extract
def crawl(crawler):
out = crawler.get_output(language='tpi')
crawl_udhr(crawler, out, filename='udhr_tpi.txt')
crawler.crawl_pngscriptures_org(out, language='tpi')
crawler.crawl_abc_net_au(out, program_id='tok-pisin')
crawl_wantokniuspepa_com(crawler, out)
def crawl_wantokniuspepa_com(crawler, out):
sections = {'abc-pasifik-nius', 'bisnis-nius', 'helt-nius', 'komentri',
'laip-stail', 'meri-nius', 'nius', 'wantok'}
seeds = set()
for section in sorted(sections):
section_url = 'http://wantokniuspepa.com/index.php/%s' % section
seeds.add(section_url)
section_index = crawler.fetch(section_url)
assert section_index.status == 200, (section_index.status, section_url)
last_page = re.search(
'"End" href=".+?start=(\d+)" class="pagenav"',
section_index.content.decode('utf-8'))
if last_page is not None:
for page in range(1, int(last_page.group(1)) + 1):
seeds.add('http://wantokniuspepa.com/index.php/%s?start=%d' %
(section, page))
urls = set()
for seed in sorted(seeds):
doc = crawler.fetch(seed)
assert doc.status == 200, (doc.status, url)
content = doc.content.decode('utf-8')
for u in re.findall(r'(/index\.php/[^"]+?)"', content):
p = u.split('/')
if len(p) > 3 and p[1] == 'index.php' and p[2] in sections:
if re.search(r'/\d{4,}', u) is not None:
urls.add('http://wantokniuspepa.com' + u.split('?')[0])
for url in sorted(urls):
doc = crawler.fetch(url)
assert doc.status == 200, (doc.status, url)
content = doc.content.decode('utf-8')
title = extract('<title>', '</title>', content)
pubdate = re.search(r'<time datetime="([^T]+?)T([^"]+?)" '
'itemprop="datePublished">', content)
pubdate = cleantext(pubdate.group(1)) if pubdate else None
body = extract('<div itemprop="articleBody">', '<ul class="pager',
content)
if not body:
continue
body = body.split('<div class="clearfix"')[0]
text = body.replace('\n', ' ')
text = text.replace(' ,', ',').replace('“ ', '“')
text = re.sub(r'</(?:div|DIV|p|P|[hH][1-6]|table|TABLE)>', '\n', text)
text = re.sub(r'<(?:br|BR)\s*/?>', '\n', text)
paras = [cleantext(p) for p in [title] + text.splitlines()]
paras = filter(None, paras)
if not paras:
continue
out.write('# Location: %s\n' % url)
out.write('# Genre: News\n')
if pubdate:
out.write('# Publication-Date: %s\n' % pubdate)
out.write('\n'.join(paras) + '\n')
|
33f14264376721a29717051bc7af53cf27b36bdd
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/deprecated/waveio_old.py
|
ad5b79136015ceb16a2d83fe49910730439330c9
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 9,725
|
py
|
waveio_old.py
|
"""
Module containing unused routines from ``pypeit.core.wavecal.waveio``
Created: 05-19-2022, TEB
"""
import glob
import os
import datetime
import numpy as np
from astropy.table import Table, Column, vstack
from pypeit import msgs
from pypeit.core.wavecal import defs, waveio
from pypeit import data
from IPython import embed
def load_by_hand():
"""
By-hand line list
Parameters
----------
line_file
add_path
Returns
-------
byhand : Table
"""
str_len_dict = defs.str_len()
src_file = os.path.join(data.Paths.arclines, 'sources', 'by_hand_list.ascii')
# Read
line_list = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Add
line_list['NIST'] = 1
# Deal with Instr and Source
ilist, slist = [], []
for row in line_list:
ilist.append(defs.instruments()[row['sInstr']]) # May need to split
slist.append(row['sSource'])
line_list['Instr'] = ilist
line_list['Source'] = np.array(slist, dtype='S{:d}'.format(str_len_dict['Source']))
# Trim
return line_list[['ion', 'wave', 'NIST', 'Instr', 'amplitude', 'Source']]
def load_nist(ion):
"""
Parse a NIST ASCII table. Note that the long ---- should have been
commented out and also the few lines at the start.
Parameters
----------
ion : str
Name of ion
Returns
-------
tbl : Table
Table of lines
"""
import glob
# Find file
srch_file = os.path.join(data.Paths.nist, f'{ion}_vacuum.ascii')
nist_file = glob.glob(srch_file)
if len(nist_file) == 0:
raise IOError(f"Cannot find NIST file {srch_file}")
# Read
nist_tbl = Table.read(nist_file[0], format='ascii.fixed_width')
gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines
nist_tbl = nist_tbl[gdrow]
# Now unique values only (no duplicates)
uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)
nist_tbl = nist_tbl[indices]
# Deal with Rel
agdrel = []
for row in nist_tbl:
try:
gdrel = int(row['Rel.'])
except:
try:
gdrel = int(row['Rel.'][:-1])
except:
gdrel = 0
agdrel.append(gdrel)
agdrel = np.array(agdrel)
# Remove and add
nist_tbl.remove_column('Rel.')
nist_tbl.remove_column('Ritz')
nist_tbl['RelInt'] = agdrel
#nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='S5'))
nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='U5'))
nist_tbl.rename_column('Observed','wave')
# Return
return nist_tbl
def load_source_table():
"""
Load table of arcline sources
Returns
-------
sources : Table
"""
src_file = os.path.join(data.Paths.arclines, 'sources', 'arcline_sources.ascii')
# Load
sources = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Return
return sources
#=============================================================================#
# These two routines include reference to the NIST files; the versions in
# `pypeit.core.wavecal.waveio` no longer contain the NIST sections.
# TEB: 05-19-2022
def load_line_list(line_file, add_path=False, use_ion=False, NIST=False):
"""
Parameters
----------
line_file : str
Full path to line_list or name of ion
add_path : bool, optional
Not yet implemented
NIST : bool, optional
NIST formatted table?
use_ion : bool, optional
Interpret line_file as an ion, e.g. CuI
Returns
-------
line_list : Table
"""
if NIST:
if use_ion:
line_file = os.path.join(data.Paths.nist, f'{line_file}_vacuum.ascii')
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# Remove unwanted columns
tkeys = line_list.keys()
for badkey in ['Ritz','Acc.','Type','Ei','Lower','Upper','TP','Line']:
for tkey in tkeys:
if badkey in tkey:
line_list.remove_column(tkey)
# Relative intensity -- Strip junk off the end
reli = []
for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):
if imsk:
reli.append(0.)
else:
try:
reli.append(float(idat))
except ValueError:
try:
reli.append(float(idat[:-1]))
except ValueError:
reli.append(0.)
line_list.remove_column('Rel.')
line_list['RelInt'] = reli
#
gdrows = line_list['Observed'] > 0. # Eliminate dummy lines
line_list = line_list[gdrows]
line_list.rename_column('Observed','wave')
# Others
# Grab ion name
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
ion = line_file[i0+1:i1]
line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))
line_list.add_column(Column([1]*len(line_list), name='NIST'))
else:
line_file = data.get_linelist_filepath(f'{line_file}_lines.dat') if use_ion else \
data.get_linelist_filepath(line_file)
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# Return
return line_list
def load_line_lists(lamps, unknown=False, skip=False, all=False, NIST=False,
restrict_on_instr=None):
"""
Loads a series of line list files
Parameters
----------
lamps : list
List of arc lamps to be used for wavelength calibration.
E.g., ['ArI','NeI','KrI','XeI']
unknown : bool, optional
skip : bool, optional
Skip missing line lists (mainly for building)
NIST : bool, optional
Load the full NIST linelists
restrict_on_instr : str, optional
Restrict according to the input spectrograph
Returns
-------
line_list : Table
"""
# All?
if all:
### Also search the cache for linelists
line_files = glob.glob(os.path.join(data.Paths.linelist, '*_lines.dat'))
lamps = []
for line_file in line_files:
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
lamps.append(line_file[i0+1:i1])
msgs.info(f"Arc lamps used: {', '.join(lamps)}")
# Read standard files
lists = []
for lamp in lamps:
if NIST:
line_file = os.path.join(data.Paths.nist, f'{lamp}_vacuum.ascii')
else:
line_file = os.path.join(data.Paths.linelist, f'{lamp}_lines.dat')
if not os.path.isfile(line_file):
if not skip:
line_files = glob.glob(os.path.join(data.Paths.linelist, '*_lines.dat'))
all_list = [os.path.split(ll)[1].replace("_lines.dat", "") for ll in line_files]
msgs.warn("Input line {:s} is not included in arclines".format(lamp))
msgs.info("Please choose from the following list:" + msgs.newline() +
",".join(all_list))
import pdb; pdb.set_trace()
raise IOError("Cannot continue without list")
else:
lists.append(load_line_list(line_file, NIST=NIST))
# Stack
if len(lists) == 0:
return None
line_lists = vstack(lists, join_type='exact')
# Restrict on the spectrograph?
if restrict_on_instr is not None:
instr_dict = defs.instruments()
gdI = (line_lists['Instr'] & instr_dict[restrict_on_instr]) > 0
line_lists = line_lists[gdI]
# Unknown
if unknown:
unkn_lines = waveio.load_unknown_list(lamps)
unkn_lines.remove_column('line_flag') # may wish to have this info
# Stack
line_lists = vstack([line_lists, unkn_lines])
# Return
return line_lists
#def load_spectrum(spec_file, index=0):
# """
# Load a simple spectrum from input file
#
# Parameters
# ----------
# spec_file : str
# Possible formats are:
#
# - .fits -- Assumes simple ndarray in 0 extension
# - .ascii -- Assumes Table.read(format='ascii') will work with single column
#
# Returns
# -------
#
# """
# import h5py
# iext = spec_file.rfind('.')
# if 'ascii' in spec_file[iext:]:
# tbl = Table.read(spec_file, format='ascii')
# key = tbl.keys()[0]
# spec = tbl[key].data
# elif 'fits' in spec_file[iext:]:
# spec = fits.open(spec_file)[0].data
# elif 'hdf5' in spec_file[iext:]:
# hdf = h5py.File(spec_file, 'r')
# if 'arcs' in hdf.keys():
# print("Taking arc={:d} in this file".format(index))
# spec = hdf['arcs/'+str(index)+'/spec'].value
# else:
# raise IOError("Not ready for this hdf5 file")
# elif 'json' in spec_file[iext:]:
# jdict = linetools.utils.loadjson(spec_file)
# try:
# spec = np.array(jdict['spec'])
# except KeyError:
# raise IOError("spec not in your JSON dict")
# # Return
# return spec
#=============================================================================#
# This function exists in `pypeit.data.arc_lines.convert_NIST_to_lists`
def write_line_list(tbl, outfile, overwrite=True):
"""
Parameters
----------
tbl
outfile
overwrite (optional), default=True
"""
# Format
tbl['wave'].format = '10.4f'
# Write
with open(outfile,'w') as f:
f.write('# Creation Date: {:s}\n'.format(str(datetime.date.today().strftime('%Y-%m-%d'))))
tbl.write(f, format='ascii.fixed_width', overwrite=overwrite)
|
47a1cf6598597ae8bc32b60fb80bc0c10d8e99eb
|
35e52549f743bc6e4949a8d4ead4326e39c3e52b
|
/tests/test_fallback_or.py
|
692eb57dcafc0a16c104284304110e2f667e9b10
|
[
"MIT"
] |
permissive
|
sspipe/sspipe
|
a825f8ae11c18085c08c49969f1afd01e2edb8e9
|
14930683631639f5821cde4881d7e0b4187681c1
|
refs/heads/master
| 2022-06-02T09:00:37.182086
| 2022-05-28T18:29:16
| 2022-05-28T18:30:12
| 137,625,559
| 149
| 4
|
MIT
| 2021-11-11T04:29:30
| 2018-06-17T03:14:45
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
test_fallback_or.py
|
from sspipe import p, px
def test_divide_fallback():
assert (dict(x=2, y=3).keys() / p(list) | p(set)) == {'x', 'y'}
assert (dict(x=2, y=3).values() / p(list) | p(set)) == {2, 3}
|
b99064fabe7f158ff50f1a486b31589c4d3137a8
|
77c4f4dd27b8d7497e66a7a5a87ad7ea83f2c4be
|
/python/benchmarks/microbenchmarks.py
|
f8ba383c70b1a8ff1683b194e8fe31ba2801e020
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"ZPL-2.1",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"NTP",
"OpenSSL",
"CC-BY-4.0",
"LLVM-exception",
"Python-2.0",
"CC0-1.0",
"LicenseRef-scancode-protobuf",
"JSON",
"Zlib",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/arrow
|
0714bfbf6fd491e1f4ed4acf838845ce4b94ec3e
|
59954225d4615f9b3bd7a3c266fb68761794229a
|
refs/heads/main
| 2023-08-24T09:04:22.253199
| 2023-08-24T07:21:51
| 2023-08-24T07:21:51
| 51,905,353
| 12,955
| 3,585
|
Apache-2.0
| 2023-09-14T20:45:56
| 2016-02-17T08:00:23
|
C++
|
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
microbenchmarks.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pyarrow.benchmark as pb
from . import common
class PandasObjectIsNull(object):
size = 10 ** 5
types = ('int', 'float', 'object', 'decimal')
param_names = ['type']
params = [types]
def setup(self, type_name):
gen = common.BuiltinsGenerator()
if type_name == 'int':
lst = gen.generate_int_list(self.size)
elif type_name == 'float':
lst = gen.generate_float_list(self.size, use_nan=True)
elif type_name == 'object':
lst = gen.generate_object_list(self.size)
elif type_name == 'decimal':
lst = gen.generate_decimal_list(self.size)
else:
assert 0
self.lst = lst
def time_PandasObjectIsNull(self, *args):
pb.benchmark_PandasObjectIsNull(self.lst)
|
33b1980d8f5b8d484b7ed333989f2551f6e13206
|
6e344abceebdd982f3609c4332af1408c0ab64ba
|
/src/logbook/__init__.py
|
abccb3311b7ce958d716314738ddc83b706a823b
|
[
"BSD-3-Clause"
] |
permissive
|
getlogbook/logbook
|
c3d9393eb7552c0aa71c42162627cf3647aeac6f
|
2f9974daf984c4661941782e8df507c20c894688
|
refs/heads/develop
| 2023-08-18T14:18:41.212587
| 2023-08-13T13:30:58
| 2023-08-13T13:30:58
| 790,576
| 817
| 115
|
NOASSERTION
| 2023-09-12T11:33:30
| 2010-07-22T10:13:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
__init__.py
|
"""
logbook
~~~~~~~
Simple logging library that aims to support desktop, command line
and web applications alike.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
from .base import (
CRITICAL,
DEBUG,
ERROR,
INFO,
NOTICE,
NOTSET,
TRACE,
WARNING,
Flags,
Logger,
LoggerGroup,
LogRecord,
NestedSetup,
Processor,
dispatch_record,
get_level_name,
lookup_level,
set_datetime_format,
)
from .handlers import (
BrotliCompressionHandler,
FileHandler,
FingersCrossedHandler,
GMailHandler,
GroupHandler,
GZIPCompressionHandler,
Handler,
HashingHandlerMixin,
LimitingHandlerMixin,
MailHandler,
MonitoringFileHandler,
NTEventLogHandler,
NullHandler,
RotatingFileHandler,
StderrHandler,
StreamHandler,
StringFormatter,
StringFormatterHandlerMixin,
SyslogHandler,
TestHandler,
TimedRotatingFileHandler,
WrapperHandler,
create_syshandler,
)
from . import compat # isort: skip
# create an anonymous default logger and provide all important
# methods of that logger as global functions
_default_logger = Logger("Generic")
_default_logger.suppress_dispatcher = True
trace = _default_logger.trace
debug = _default_logger.debug
info = _default_logger.info
warn = _default_logger.warn
warning = _default_logger.warning
notice = _default_logger.notice
error = _default_logger.error
exception = _default_logger.exception
catch_exceptions = _default_logger.catch_exceptions
critical = _default_logger.critical
log = _default_logger.log
del _default_logger
# install a default global handler
if os.environ.get("LOGBOOK_INSTALL_DEFAULT_HANDLER"):
default_handler = StderrHandler()
default_handler.push_application()
from .__version__ import __version__
|
9f235e5aa6d5da525040a8e47e8caf334d9ca944
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/python-argparse/abbreviate.py
|
480e3413e6438848e095929a8f6edd2981b70695
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
abbreviate.py
|
import argparse
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--argument-with-a-long-name")
args = parser.parse_args()
print(args.argument_with_a_long_name)
|
3030505331a8fae7ca2524dccd5f77eec11f4362
|
977f7a7386899a5d0152b29b57ec26682b430437
|
/data_managers/data_manager_pangolearn/data_manager/pangolearn_dm.py
|
c7ea5357b441537c591043bd3b61ffe6d6e1bfba
|
[
"MIT"
] |
permissive
|
galaxyproject/tools-iuc
|
0b87e21e1cb075ca6dc6b12622bc4e538a7c6507
|
96f8a533278b4b6394aebd7a8f537513b0d29b1a
|
refs/heads/main
| 2023-08-31T16:14:34.563541
| 2023-08-31T04:31:22
| 2023-08-31T04:31:22
| 23,992,530
| 164
| 508
|
MIT
| 2023-09-13T19:41:14
| 2014-09-13T11:18:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,202
|
py
|
pangolearn_dm.py
|
#!/usr/bin/env python
import argparse
import datetime
import json
import operator
import os
import shutil
import sys
import tarfile
import requests
def get_model_list(
existing_release_tags,
url="https://api.github.com/repos/cov-lineages/pangoLEARN/releases"
):
page_num = 0
while True:
page_num += 1
response = requests.get(url + f'?page={page_num}')
if response.status_code == 200:
release_list_chunk = json.loads(response.text)
if not release_list_chunk:
# past the last page of results
return
for e in release_list_chunk:
if e["tag_name"] in existing_release_tags:
continue
if e["prerelease"]:
continue
yield dict(
tag_name=e["tag_name"],
name=e["name"],
date=parse_date(e["tag_name"]),
tarball_url=e["tarball_url"],
)
else:
response.raise_for_status()
def filter_by_date(existing_release_tags, start_date=None, end_date=None):
ret = []
for release in get_model_list(existing_release_tags):
if start_date and release["date"] < start_date:
break
if not end_date or release["date"] <= end_date:
ret.append(release)
return ret
def download_and_unpack(url, output_directory, v3datatree=True):
response = requests.get(url)
if response.status_code == 200:
tmp_filename = url.split("/")[-1]
tmpfile = open(tmp_filename, "wb")
tmpfile.write(response.content)
tmpfile.close()
shutil.copy(tmp_filename, "/tmp")
tf = tarfile.open(tmp_filename)
pl_path = os.path.join(output_directory, tf.next().name)
tf.extractall(output_directory)
os.unlink(tmp_filename)
pangolearn_unpacked_dir = os.path.join(pl_path, "pangoLEARN")
if v3datatree:
# pangolin v3 expects a datadir with the entire pangoLEARN
# subfolder in it.
# In addition, it will only use the data if the __init__.py file
# contained within that subfolder declares a __version__ that is
# newer than the version installed with pangolin.
pangolearn_dir = os.path.join(
output_directory, tmp_filename, "pangoLEARN"
)
os.mkdir(os.path.dirname(pangolearn_dir))
# rewrite the __init__.py file and make the __version__ string
# appear newer than anything that might come with pangolin by
# prepending a "v" to it.
pangolearn_init = open(
os.path.join(pangolearn_unpacked_dir, "__init__.py")
).readlines()
with open(
os.path.join(pangolearn_unpacked_dir, "__init__.py"), "w"
) as o:
for line in pangolearn_init:
if line.startswith('__version__ = "'):
line = line.replace(
'__version__ = "', '__version__ = "v'
)
o.write(line)
else:
# Earlier versions of pangolin expect a datadir with just the
# contents of the downloaded pangoLEARN subfolder in it and don't
# care about the declared version in __init__.py.
pangolearn_dir = os.path.join(
output_directory, tmp_filename
)
os.rename(
pangolearn_unpacked_dir,
pangolearn_dir
)
shutil.rmtree(pl_path)
return tmp_filename
else:
response.raise_for_status()
def parse_date(d):
# Tries to parse the first 10 chars of d as a date, which currently
# succeeds for all pangolearn model releases.
return datetime.datetime.strptime(d[:10], "%Y-%m-%d")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--testmode", default=False, action="store_true")
parser.add_argument("--latest", default=False, action="store_true")
parser.add_argument("--start_date", type=parse_date)
parser.add_argument("--end_date", type=parse_date)
parser.add_argument("--overwrite", default=False, action="store_true")
parser.add_argument('--pangolearn_format_version')
parser.add_argument("datatable_name")
parser.add_argument("galaxy_datamanager_filename")
args = parser.parse_args()
if args.testmode:
releases = filter_by_date([], start_date=args.start_date, end_date=args.end_date)
for release in releases:
print(release["tag_name"], release["tarball_url"].split("/")[-1], release["date"])
sys.exit(0)
with open(args.galaxy_datamanager_filename) as fh:
config = json.load(fh)
output_directory = config.get("output_data", [{}])[0].get("extra_files_path", None)
data_manager_dict = {}
data_manager_dict["data_tables"] = config.get("data_tables", {})
data_manager_dict["data_tables"][args.datatable_name] = data_manager_dict[
"data_tables"
].get(args.datatable_name, [])
# NOTE: the data_manager_dict["data_tables"][args.datatable_name] is not actually populated with the
# contents of the existing data table, so the "no-overwrite" logic and the
# only-download-what-we-don't-have logic does not in fact work. It is left but unused for now.
if not args.overwrite:
existing_release_tags = set(
[
el["value"]
for el in data_manager_dict["data_tables"][args.datatable_name]
]
)
else:
existing_release_tags = set()
if args.latest:
releases = [next(get_model_list(existing_release_tags))]
else:
releases = filter_by_date(
existing_release_tags, start_date=args.start_date, end_date=args.end_date
)
releases_to_download = [
release
for release in releases
if release["tag_name"] not in existing_release_tags
]
for release in releases_to_download:
if args.pangolearn_format_version is not None:
version = args.pangolearn_format_version
else:
# 2021-05-27 was the first release of pangoLEARN for pangolin 3, which changed DB format
if release["date"] >= datetime.datetime(2021, 5, 27):
version = '3.0'
else:
version = '1.0'
fname = download_and_unpack(
release["tarball_url"],
output_directory,
v3datatree=version == '3.0'
)
data_manager_dict["data_tables"][args.datatable_name].append(
dict(
value=release["tag_name"],
description=release["name"],
format_version=version,
path=output_directory + "/" + fname,
)
)
data_manager_dict["data_tables"][args.datatable_name].sort(
key=operator.itemgetter("value"), reverse=True
)
with open(args.galaxy_datamanager_filename, "w") as fh:
json.dump(data_manager_dict, fh, indent=2, sort_keys=True)
|
22b42dc103ba6b99610b3004a013934741a7bff4
|
ae28de03d8a17c1e43215d9f5396f6dab46aa7f1
|
/tests/sampletest/paramtest.py
|
848a8d158e1ef9ba50f68a765758c11756cf8a5f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
Tencent/QTAF
|
0b1ee8a5e08f2dd03552d9d982d180495b0aa634
|
70a76bb78b9d9a603f7b679bac8e38ad5b1db2f0
|
refs/heads/master
| 2023-09-01T07:43:58.939781
| 2023-07-25T10:50:06
| 2023-07-25T10:50:06
| 68,911,733
| 518
| 156
|
NOASSERTION
| 2023-07-27T08:26:24
| 2016-09-22T10:43:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
paramtest.py
|
# -*- coding: utf-8 -*-
"""
参数测试用例
"""
import testbase
class ParamTestWithoutAddParams(testbase.TestCase):
"""参数测试用例"""
owner = "foo"
status = testbase.TestCase.EnumStatus.Ready
timeout = 0.1
priority = testbase.TestCase.EnumPriority.Normal
def runTest(self): # pylint: disable=invalid-name
self.assert_("不存在test", "test" not in self.__dict__)
self.assert_("不存在test1", "test1" not in self.__dict__)
class ParamTest(testbase.TestCase):
"""参数测试用例"""
owner = "foo"
status = testbase.TestCase.EnumStatus.Ready
timeout = 0.1
priority = testbase.TestCase.EnumPriority.Normal
def add_params(self):
self.add_param("test", int, default=100)
self.add_param("test1", int, default=100)
def runTest(self): # pylint: disable=invalid-name
self.assert_("存在test", "test" in self.__dict__)
self.assert_("存在test1", "test1" in self.__dict__)
self.assert_equal("断言test", self.test, 100)
self.assert_equal("断言test1", self.test1, 100)
class ParamOverWriteTest(testbase.TestCase):
"""参数测试用例"""
owner = "foo"
status = testbase.TestCase.EnumStatus.Ready
timeout = 0.1
priority = testbase.TestCase.EnumPriority.Normal
def add_params(self):
self.add_param("test", int, default=100)
self.add_param("test1", int, default=100)
def runTest(self): # pylint: disable=invalid-name
self.assert_("存在test", "test" in self.__dict__)
self.assert_("存在test1", "test1" in self.__dict__)
self.assert_equal("断言test", self.test, 200)
self.assert_equal("断言test1", self.test1, 400)
if __name__ == "__main__":
ParamTest().run()
|
4e11c7876d46d392971c9501acbd725d20aed885
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/rtyper/exceptiondata.py
|
e0086cb0f2ba210f47e71b331821202db123e030
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,513
|
py
|
exceptiondata.py
|
from rpython.annotator import model as annmodel
from rpython.annotator.exception import standardexceptions
from rpython.rtyper.llannotation import SomePtr
from rpython.rtyper.rclass import (
ll_issubclass, ll_type, ll_cast_to_object, getclassrepr, getinstancerepr)
class UnknownException(Exception):
pass
class ExceptionData(object):
"""Public information for the code generators to help with exceptions."""
standardexceptions = standardexceptions
def __init__(self, rtyper):
# (NB. rclass identifies 'Exception' and 'object')
r_type = rtyper.rootclass_repr
r_instance = getinstancerepr(rtyper, None)
r_type.setup()
r_instance.setup()
self.r_exception_type = r_type
self.r_exception_value = r_instance
self.lltype_of_exception_type = r_type.lowleveltype
self.lltype_of_exception_value = r_instance.lowleveltype
self.rtyper = rtyper
def finish(self, rtyper):
bk = rtyper.annotator.bookkeeper
for cls in self.standardexceptions:
classdef = bk.getuniqueclassdef(cls)
getclassrepr(rtyper, classdef).setup()
def get_standard_ll_exc_instance(self, rtyper, clsdef):
r_inst = getinstancerepr(rtyper, clsdef)
example = r_inst.get_reusable_prebuilt_instance()
example = ll_cast_to_object(example)
return example
def get_standard_ll_exc_instance_by_class(self, exceptionclass):
if exceptionclass not in self.standardexceptions:
raise UnknownException(exceptionclass)
clsdef = self.rtyper.annotator.bookkeeper.getuniqueclassdef(
exceptionclass)
return self.get_standard_ll_exc_instance(self.rtyper, clsdef)
def make_helpers(self, rtyper):
# create helper functionptrs
self.fn_exception_match = self.make_exception_matcher(rtyper)
self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper)
def make_exception_matcher(self, rtyper):
# ll_exception_matcher(real_exception_vtable, match_exception_vtable)
s_typeptr = SomePtr(self.lltype_of_exception_type)
helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr])
return helper_fn
def make_type_of_exc_inst(self, rtyper):
# ll_type_of_exc_inst(exception_instance) -> exception_vtable
s_excinst = SomePtr(self.lltype_of_exception_value)
helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst])
return helper_fn
|
5acb975f4247e512ea8e5bc1126e6d2f71a9bcd3
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/cpydiff/types_str_keywords.py
|
77a4eac1c1db2aec33e270d98a9e42dda611ffa5
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
types_str_keywords.py
|
"""
categories: Types,str
description: str(...) with keywords not implemented
cause: Unknown
workaround: Input the encoding format directly. eg ``print(bytes('abc', 'utf-8'))``
"""
print(str(b"abc", encoding="utf8"))
|
f723322810b5794b343ba617702fe91b3d18f51e
|
1e148aada79cb648872bb8ecc740a6a798b2e236
|
/audiomentations/augmentations/clipping_distortion.py
|
466268b6ee6683c357f68c834df8900c362add30
|
[
"MIT"
] |
permissive
|
iver56/audiomentations
|
a40ae457ca03ab8c927ad804f489cef783dae8d4
|
498a7d4f149d8917813aa35ff18e748cff49cd09
|
refs/heads/main
| 2023-09-05T05:53:05.369792
| 2023-08-30T13:12:51
| 2023-08-30T13:12:51
| 170,352,817
| 1,520
| 182
|
MIT
| 2023-09-07T14:35:26
| 2019-02-12T16:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
clipping_distortion.py
|
import random
import numpy as np
from numpy.typing import NDArray
from audiomentations.core.transforms_interface import BaseWaveformTransform
class ClippingDistortion(BaseWaveformTransform):
"""Distort signal by clipping a random percentage of points
The percentage of points that will be clipped is drawn from a uniform distribution between
the two input parameters min_percentile_threshold and max_percentile_threshold. If for instance
30% is drawn, the samples are clipped if they're below the 15th or above the 85th percentile.
"""
supports_multichannel = True
def __init__(
self,
min_percentile_threshold: int = 0,
max_percentile_threshold: int = 40,
p: float = 0.5,
):
"""
:param min_percentile_threshold: int, A lower bound on the total percent of samples that
will be clipped
:param max_percentile_threshold: int, An upper bound on the total percent of samples that
will be clipped
:param p: The probability of applying this transform
"""
super().__init__(p)
assert min_percentile_threshold <= max_percentile_threshold
assert 0 <= min_percentile_threshold <= 100
assert 0 <= max_percentile_threshold <= 100
self.min_percentile_threshold = min_percentile_threshold
self.max_percentile_threshold = max_percentile_threshold
def randomize_parameters(self, samples: NDArray[np.float32], sample_rate: int):
super().randomize_parameters(samples, sample_rate)
if self.parameters["should_apply"]:
self.parameters["percentile_threshold"] = random.randint(
self.min_percentile_threshold, self.max_percentile_threshold
)
def apply(self, samples: NDArray[np.float32], sample_rate: int):
lower_percentile_threshold = int(self.parameters["percentile_threshold"] / 2)
lower_threshold, upper_threshold = np.percentile(
samples, [lower_percentile_threshold, 100 - lower_percentile_threshold]
)
samples = np.clip(samples, lower_threshold, upper_threshold)
return samples
|
dd430785cedb789c8e30fa4745ec543d0b314f94
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/utils/exploration/ornstein_uhlenbeck_noise.py
|
441f6914e6565f5a5784d43c8c8f72d7ce5595ab
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 10,448
|
py
|
ornstein_uhlenbeck_noise.py
|
import numpy as np
from typing import Optional, Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
from ray.rllib.utils.framework import (
try_import_tf,
try_import_torch,
get_variable,
TensorType,
)
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.schedules import Schedule
from ray.rllib.utils.tf_utils import zero_logps_from_actions
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class OrnsteinUhlenbeckNoise(GaussianNoise):
"""An exploration that adds Ornstein-Uhlenbeck noise to continuous actions.
If explore=True, returns sampled actions plus a noise term X,
which changes according to this formula:
Xt+1 = -theta*Xt + sigma*N[0,stddev], where theta, sigma and stddev are
constants. Also, some completely random period is possible at the
beginning.
If explore=False, returns the deterministic action.
"""
def __init__(
self,
action_space,
*,
framework: str,
ou_theta: float = 0.15,
ou_sigma: float = 0.2,
ou_base_scale: float = 0.1,
random_timesteps: int = 1000,
initial_scale: float = 1.0,
final_scale: float = 0.02,
scale_timesteps: int = 10000,
scale_schedule: Optional[Schedule] = None,
**kwargs
):
"""Initializes an Ornstein-Uhlenbeck Exploration object.
Args:
action_space: The gym action space used by the environment.
ou_theta: The theta parameter of the Ornstein-Uhlenbeck process.
ou_sigma: The sigma parameter of the Ornstein-Uhlenbeck process.
ou_base_scale: A fixed scaling factor, by which all OU-
noise is multiplied. NOTE: This is on top of the parent
GaussianNoise's scaling.
random_timesteps: The number of timesteps for which to act
completely randomly. Only after this number of timesteps, the
`self.scale` annealing process will start (see below).
initial_scale: The initial scaling weight to multiply the
noise with.
final_scale: The final scaling weight to multiply the noise with.
scale_timesteps: The timesteps over which to linearly anneal the
scaling factor (after(!) having used random actions for
`random_timesteps` steps.
scale_schedule: An optional Schedule object to use (instead
of constructing one from the given parameters).
framework: One of None, "tf", "torch".
"""
# The current OU-state value (gets updated each time, an eploration
# action is computed).
self.ou_state = get_variable(
np.array(action_space.low.size * [0.0], dtype=np.float32),
framework=framework,
tf_name="ou_state",
torch_tensor=True,
device=None,
)
super().__init__(
action_space,
framework=framework,
random_timesteps=random_timesteps,
initial_scale=initial_scale,
final_scale=final_scale,
scale_timesteps=scale_timesteps,
scale_schedule=scale_schedule,
stddev=1.0, # Force `self.stddev` to 1.0.
**kwargs
)
self.ou_theta = ou_theta
self.ou_sigma = ou_sigma
self.ou_base_scale = ou_base_scale
# Now that we know the device, move ou_state there, in case of PyTorch.
if self.framework == "torch" and self.device is not None:
self.ou_state = self.ou_state.to(self.device)
@override(GaussianNoise)
def _get_tf_exploration_action_op(
self,
action_dist: ActionDistribution,
explore: Union[bool, TensorType],
timestep: Union[int, TensorType],
):
ts = timestep if timestep is not None else self.last_timestep
scale = self.scale_schedule(ts)
# The deterministic actions (if explore=False).
deterministic_actions = action_dist.deterministic_sample()
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
gaussian_sample = tf.random.normal(
shape=[self.action_space.low.size], stddev=self.stddev
)
ou_new = self.ou_theta * -self.ou_state + self.ou_sigma * gaussian_sample
if self.framework == "tf2":
self.ou_state.assign_add(ou_new)
ou_state_new = self.ou_state
else:
ou_state_new = tf1.assign_add(self.ou_state, ou_new)
high_m_low = self.action_space.high - self.action_space.low
high_m_low = tf.where(
tf.math.is_inf(high_m_low), tf.ones_like(high_m_low), high_m_low
)
noise = scale * self.ou_base_scale * ou_state_new * high_m_low
stochastic_actions = tf.clip_by_value(
deterministic_actions + noise,
self.action_space.low * tf.ones_like(deterministic_actions),
self.action_space.high * tf.ones_like(deterministic_actions),
)
# Stochastic actions could either be: random OR action + noise.
random_actions, _ = self.random_exploration.get_tf_exploration_action_op(
action_dist, explore
)
exploration_actions = tf.cond(
pred=tf.convert_to_tensor(ts < self.random_timesteps),
true_fn=lambda: random_actions,
false_fn=lambda: stochastic_actions,
)
# Chose by `explore` (main exploration switch).
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool)
else explore,
true_fn=lambda: exploration_actions,
false_fn=lambda: deterministic_actions,
)
# Logp=always zero.
logp = zero_logps_from_actions(deterministic_actions)
# Increment `last_timestep` by 1 (or set to `timestep`).
if self.framework == "tf2":
if timestep is None:
self.last_timestep.assign_add(1)
else:
self.last_timestep.assign(tf.cast(timestep, tf.int64))
else:
assign_op = (
tf1.assign_add(self.last_timestep, 1)
if timestep is None
else tf1.assign(self.last_timestep, timestep)
)
with tf1.control_dependencies([assign_op, ou_state_new]):
action = tf.identity(action)
logp = tf.identity(logp)
return action, logp
@override(GaussianNoise)
def _get_torch_exploration_action(
self,
action_dist: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType],
):
# Set last timestep or (if not given) increase by one.
self.last_timestep = (
timestep if timestep is not None else self.last_timestep + 1
)
# Apply exploration.
if explore:
# Random exploration phase.
if self.last_timestep < self.random_timesteps:
action, _ = self.random_exploration.get_torch_exploration_action(
action_dist, explore=True
)
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
else:
det_actions = action_dist.deterministic_sample()
scale = self.scale_schedule(self.last_timestep)
gaussian_sample = scale * torch.normal(
mean=torch.zeros(self.ou_state.size()), std=1.0
).to(self.device)
ou_new = (
self.ou_theta * -self.ou_state + self.ou_sigma * gaussian_sample
)
self.ou_state += ou_new
high_m_low = torch.from_numpy(
self.action_space.high - self.action_space.low
).to(self.device)
high_m_low = torch.where(
torch.isinf(high_m_low),
torch.ones_like(high_m_low).to(self.device),
high_m_low,
)
noise = scale * self.ou_base_scale * self.ou_state * high_m_low
action = torch.min(
torch.max(
det_actions + noise,
torch.tensor(
self.action_space.low,
dtype=torch.float32,
device=self.device,
),
),
torch.tensor(
self.action_space.high, dtype=torch.float32, device=self.device
),
)
# No exploration -> Return deterministic actions.
else:
action = action_dist.deterministic_sample()
# Logp=always zero.
logp = torch.zeros((action.size()[0],), dtype=torch.float32, device=self.device)
return action, logp
@override(GaussianNoise)
def get_state(self, sess: Optional["tf.Session"] = None):
"""Returns the current scale value.
Returns:
Union[float,tf.Tensor[float]]: The current scale value.
"""
if sess:
return sess.run(
dict(
self._tf_state_op,
**{
"ou_state": self.ou_state,
}
)
)
state = super().get_state()
return dict(
state,
**{
"ou_state": convert_to_numpy(self.ou_state)
if self.framework != "tf"
else self.ou_state,
}
)
@override(GaussianNoise)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
if self.framework == "tf":
self.ou_state.load(state["ou_state"], session=sess)
elif isinstance(self.ou_state, np.ndarray):
self.ou_state = state["ou_state"]
elif torch and torch.is_tensor(self.ou_state):
self.ou_state = torch.from_numpy(state["ou_state"])
else:
self.ou_state.assign(state["ou_state"])
super().set_state(state, sess=sess)
|
dc6669626029b3e8c5866b7f51ca24bce7d8c2aa
|
66f383fec502102bfec58ed8cb9c43a71e599c55
|
/apps/changelog/classes/release_version.py
|
75f585b1a81d7ead6812ceb833c6ada8b36d33df
|
[
"MIT"
] |
permissive
|
hacktoolkit/django-htk
|
0a984a28f7fbc7eed8e2b1975d210792ddbee829
|
935c4913e33d959f8c29583825f72b238f85b380
|
refs/heads/master
| 2023-08-08T11:52:54.298160
| 2023-07-21T19:08:37
| 2023-07-21T19:08:37
| 15,924,904
| 210
| 65
|
MIT
| 2023-09-08T23:59:28
| 2014-01-15T04:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
release_version.py
|
# Python Standard Library Imports
import datetime
from collections import namedtuple
class ReleaseVersion(
namedtuple(
'ReleaseVersion',
'origin_url,ref,date,sha,branch',
)
):
# origin_url: str
# ref: str
# date: str
# sha: str
# branch: str
@property
def readable_date(self):
dt = datetime.datetime.strptime(self.date, '%Y%m%d%H%M%S')
# fmt = '%A, %B %-d, %Y %H:%M%S'
fmt = '%c'
value = dt.strftime(fmt) if dt else self.date
return value
@property
def tag(self):
value = (
self.ref[len('tag: ') :] # noqa: E203
if self.ref.startswith('tag: ')
else self.ref
)
return value
@property
def url(self):
url = '{}/releases/tag/{}'.format(self.origin_url, self.tag)
return url
|
e138fa181db0d618c5fd56e73164a0935a500445
|
aa2dd0720ac3cf261c7e2d2cdf3d88dee68360d5
|
/tests/core/test_views_heartbeat.py
|
bee4a71eeda7e8c1c7a73c2ea35445f32c05b37e
|
[
"Apache-2.0"
] |
permissive
|
Kinto/kinto
|
3025e269a5f2ecc8077fd44fbb1e6c38ae6a4a8b
|
6edf6453033e0106410fe1f8c70323b6fea2f2fe
|
refs/heads/master
| 2023-08-31T13:36:10.987472
| 2023-08-22T09:37:52
| 2023-08-22T09:37:52
| 31,315,021
| 4,764
| 575
|
NOASSERTION
| 2023-09-13T14:41:08
| 2015-02-25T13:34:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
test_views_heartbeat.py
|
from unittest import mock
from kinto.core.testing import unittest
from .support import BaseWebTest
class SuccessTest(BaseWebTest, unittest.TestCase):
def test_returns_storage_true_if_ok(self):
response = self.app.get("/__heartbeat__")
self.assertEqual(response.json["storage"], True)
def test_returns_cache_true_if_ok(self):
response = self.app.get("/__heartbeat__")
self.assertEqual(response.json["cache"], True)
def test_successful_if_one_heartbeat_is_none(self):
self.app.app.registry.heartbeats["probe"] = lambda r: None
response = self.app.get("/__heartbeat__", status=200)
self.assertEqual(response.json["probe"], None)
class FailureTest(BaseWebTest, unittest.TestCase):
def setUp(self):
self._heartbeats = {**self.app.app.registry.heartbeats}
super().setUp()
def tearDown(self):
self.app.app.registry.heartbeats = self._heartbeats
super().tearDown()
def test_returns_storage_false_if_ko(self):
self.app.app.registry.heartbeats["storage"] = lambda r: False
response = self.app.get("/__heartbeat__", status=503)
self.assertEqual(response.json["storage"], False)
self.assertEqual(response.json["cache"], True)
def test_returns_cache_false_if_ko(self):
self.app.app.registry.heartbeats["cache"] = lambda r: False
response = self.app.get("/__heartbeat__", status=503)
self.assertEqual(response.json["cache"], False)
self.assertEqual(response.json["storage"], True)
def test_returns_false_if_heartbeat_times_out(self):
def sleepy(request):
import time
time.sleep(1)
self.app.app.registry.heartbeats["cache"] = sleepy
with mock.patch.dict(self.app.app.registry.settings, [("heartbeat_timeout_seconds", 0.1)]):
response = self.app.get("/__heartbeat__", status=503)
self.assertEqual(response.json["cache"], False)
self.assertEqual(response.json["storage"], True)
def test_returns_false_if_heartbeat_fails(self):
self.app.app.registry.heartbeats["cache"] = lambda r: 1 / 0
response = self.app.get("/__heartbeat__", status=503)
self.assertEqual(response.json["cache"], False)
self.assertEqual(response.json["storage"], True)
class LoadBalancerHeartbeat(BaseWebTest, unittest.TestCase):
def test_returns_200_with_empty_body(self):
resp = self.app.get("/__lbheartbeat__", status=200)
self.assertEqual(resp.json, {})
|
b275f0d8407a5c435b88684a52b7c8c95ce175e9
|
b156aaaed17aa2b36e8f23c6cf92e732fb931525
|
/hpOneView/resources/servers/migratable_vc_domains.py
|
78d8cdb6f4cfda564e1cf11cc6ebdb0bc8823de2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
HewlettPackard/python-hpOneView
|
48dba564bfeeddf5ca442c49a6eaa60638d4f3e5
|
678d53b338f6bc7af7adb63153d7d8d99dc94ac0
|
refs/heads/release/v4.8.0
| 2023-08-09T07:36:15.905572
| 2019-08-28T09:53:45
| 2019-08-28T09:53:45
| 14,697,221
| 108
| 80
|
MIT
| 2020-02-10T09:00:45
| 2013-11-25T20:18:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,291
|
py
|
migratable_vc_domains.py
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient
class MigratableVcDomains(object):
"""
The migratable VC domains resource provides methods for migrating Virtual Connect (VC)
enclosures into the appliance. The operations are testing compatibility of a VC
managed enclosure, retrieving a compatibility report, deleting a
compatibility report and migrating a VC managed enclosure into the appliance.
"""
URI = '/rest/migratable-vc-domains'
def __init__(self, connection):
self._connection = connection
self._client = ResourceClient(connection, self.URI)
@staticmethod
def make_migration_information(oaIpAddress, oaUsername, oaPassword, vcmUsername, vcmPassword,
iloLicenseType='OneView', enclosureGroupUri=None):
return {
'credentials': {
'oaIpAddress': oaIpAddress,
'oaUsername': oaUsername,
'oaPassword': oaPassword,
'vcmUsername': vcmUsername,
'vcmPassword': vcmPassword,
'type': 'EnclosureCredentials'
},
'iloLicenseType': iloLicenseType,
'enclosureGroupUri': enclosureGroupUri,
'type': 'migratable-vc-domains',
'category': 'migratable-vc-domains'
}
def test_compatibility(self, migrationInformation, timeout=-1):
"""
Creates a migration report for an enclosure with a Virtual Connect domain.
Args:
migrationInformation: A dict specifying the enclosure, OA username, OA password, VCM username, and VCM
password among other things. Use make_migration_information to easily create this dict.
timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the task in
OneView; just stops waiting for its completion.
Returns: dict: a migration report.
"""
return self._client.create(migrationInformation, timeout=timeout)
def get_migration_report(self, id_or_uri):
"""
Returns a migration report that has previously been generated.
Args:
id_or_uri: ID or URI of the migration report.
Returns: dict: a migration report.
"""
return self._client.get(id_or_uri)
def migrate(self, id_or_uri, timeout=-1):
"""
Initiates a migration of an enclosure specified by the ID or URI of a migration report.
Args:
id_or_uri: ID or URI of the migration report.
timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the task in
OneView; just stops waiting for its completion.
Returns: dict: a migration report.
"""
# create the special payload to tell the VC Migration Manager to migrate the VC domain
migrationInformation = {
'migrationState': 'Migrated',
'type': 'migratable-vc-domains',
'category': 'migratable-vc-domains'
}
# call build_uri manually since .update(...) doesn't do it and the URI is not to be included in the body when
# requesting a migration
complete_uri = self._client.build_uri(id_or_uri)
return self._client.update(migrationInformation, uri=complete_uri, timeout=timeout)
def delete(self, id_or_uri, timeout=-1):
"""
Deletes a migration report.
Args:
id_or_uri: ID or URI of the migration report.
timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the task in
OneView; just stops waiting for its completion.
Returns: bool: Indicates if the migration report was successfully deleted.
"""
return self._client.delete(id_or_uri, timeout=timeout)
|
6680ae80673196d5027d1011059e965ca041bbaf
|
cfa35dc2ea93ee0eceb2399a9e6112e987579c09
|
/stonesoup/writer/yaml.py
|
ae3f85ba99558184b9ad50346f576c39c0d8dc2e
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
dstl/Stone-Soup
|
227e6a9e6fbdceca14af3f0259f311ec74095597
|
f24090cc919b3b590b84f965a3884ed1293d181d
|
refs/heads/main
| 2023-09-01T14:33:14.626428
| 2023-09-01T11:35:46
| 2023-09-01T11:35:46
| 98,420,803
| 315
| 126
|
MIT
| 2023-09-14T14:55:34
| 2017-07-26T12:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
yaml.py
|
from pathlib import Path
from ..base import Property
from ..serialise import YAML
from ..reader import DetectionReader, GroundTruthReader, SensorDataReader
from ..tracker import Tracker
from .base import Writer
class YAMLWriter(Writer):
"""YAML Writer"""
path: Path = Property(doc="File to save data to. Str will be converted to Path")
groundtruth_source: GroundTruthReader = Property(default=None)
sensor_data_source: SensorDataReader = Property(default=None)
detections_source: DetectionReader = Property(default=None)
tracks_source: Tracker = Property(default=None)
def __init__(self, path, *args, **kwargs):
if not isinstance(path, Path):
path = Path(path) # Ensure Path
super().__init__(path, *args, **kwargs)
if not any((self.groundtruth_source, self.sensor_data_source,
self.detections_source, self.tracks_source)):
raise ValueError("At least one source required")
self._file = self.path.open('w')
yaml = YAML()
# Required as will be writing multiple documents to file
yaml.explicit_start = True
yaml.explicit_end = True
self._yaml = yaml
def write(self):
if self.tracks_source:
gen = self.tracks_source
elif self.detections_source:
gen = self.detections_source
elif self.sensor_data_source:
gen = self.sensor_data_source
elif self.groundtruth_source:
gen = self.groundtruth_source
else: # pragma: no cover
raise RuntimeError("At least one source required")
for time, _ in gen:
data = {'time': time}
if self.tracks_source:
data['tracks'] = self.tracks_source.tracks
if self.detections_source:
data['detections'] = self.detections_source.detections
if self.sensor_data_source:
data['sensor_data'] = self.sensor_data_source.sensor_data
if self.groundtruth_source:
data['groundtruth_paths'] = \
self.groundtruth_source.groundtruth_paths
self._yaml.dump(data, self._file)
def __enter__(self):
return self
def __exit__(self, *args):
if getattr(self, '_file', None):
self._file.close()
def __del__(self):
self.__exit__()
|
b6e3889bc2bdb6e89596113c50f3050ebee10e24
|
d21e88c04d42ea34160768b9fa9f4bb51542f8b3
|
/chapter2-deep-networks/densenet-cifar10-2.4.1.py
|
50c26b146b3df4fc225b8f5a0b96460a0f9f9c14
|
[
"MIT"
] |
permissive
|
PacktPublishing/Advanced-Deep-Learning-with-Keras
|
f8ecc5e7d4e352ebc49f18c0021dd29500c1461a
|
7f447a07eb2f3dc41c83d468ae102ab8fa9dff05
|
refs/heads/master
| 2023-07-04T19:54:29.548004
| 2023-01-30T09:52:34
| 2023-01-30T09:52:34
| 125,326,602
| 1,672
| 961
|
MIT
| 2023-04-12T21:42:17
| 2018-03-15T07:08:43
|
Python
|
UTF-8
|
Python
| false
| false
| 8,397
|
py
|
densenet-cifar10-2.4.1.py
|
"""Trains a 100-Layer DenseNet on the CIFAR10 dataset.
With data augmentation:
Greater than 93.55% test accuracy in 200 epochs
225sec per epoch on GTX 1080Ti
Densely Connected Convolutional Networks
https://arxiv.org/pdf/1608.06993.pdf
http://openaccess.thecvf.com/content_cvpr_2017/papers/
Huang_Densely_Connected_Convolutional_CVPR_2017_paper.pdf
Network below is similar to 100-Layer DenseNet-BC (k=12)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import Input, Flatten, Dropout
from tensorflow.keras.layers import concatenate, Activation
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import plot_model
from tensorflow.keras.utils import to_categorical
import os
import numpy as np
import math
# training parameters
batch_size = 32
epochs = 200
data_augmentation = True
# network parameters
num_classes = 10
num_dense_blocks = 3
use_max_pool = False
# DenseNet-BC with dataset augmentation
# Growth rate | Depth | Accuracy (paper)| Accuracy (this) |
# 12 | 100 | 95.49% | 93.74% |
# 24 | 250 | 96.38% | requires big mem GPU |
# 40 | 190 | 96.54% | requires big mem GPU |
growth_rate = 12
depth = 100
num_bottleneck_layers = (depth - 4) // (2 * num_dense_blocks)
num_filters_bef_dense_block = 2 * growth_rate
compression_factor = 0.5
# load the CIFAR10 data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# input image dimensions
input_shape = x_train.shape[1:]
# mormalize data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# convert class vectors to binary class matrices.
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
# start model definition
# densenet CNNs (composite function) are made of BN-ReLU-Conv2D
inputs = Input(shape=input_shape)
x = BatchNormalization()(inputs)
x = Activation('relu')(x)
x = Conv2D(num_filters_bef_dense_block,
kernel_size=3,
padding='same',
kernel_initializer='he_normal')(x)
x = concatenate([inputs, x])
# stack of dense blocks bridged by transition layers
for i in range(num_dense_blocks):
# a dense block is a stack of bottleneck layers
for j in range(num_bottleneck_layers):
y = BatchNormalization()(x)
y = Activation('relu')(y)
y = Conv2D(4 * growth_rate,
kernel_size=1,
padding='same',
kernel_initializer='he_normal')(y)
if not data_augmentation:
y = Dropout(0.2)(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv2D(growth_rate,
kernel_size=3,
padding='same',
kernel_initializer='he_normal')(y)
if not data_augmentation:
y = Dropout(0.2)(y)
x = concatenate([x, y])
# no transition layer after the last dense block
if i == num_dense_blocks - 1:
continue
# transition layer compresses num of feature maps and reduces the size by 2
num_filters_bef_dense_block += num_bottleneck_layers * growth_rate
num_filters_bef_dense_block = int(num_filters_bef_dense_block * compression_factor)
y = BatchNormalization()(x)
y = Conv2D(num_filters_bef_dense_block,
kernel_size=1,
padding='same',
kernel_initializer='he_normal')(y)
if not data_augmentation:
y = Dropout(0.2)(y)
x = AveragePooling2D()(y)
# add classifier on top
# after average pooling, size of feature map is 1 x 1
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
kernel_initializer='he_normal',
activation='softmax')(y)
# instantiate and compile model
# orig paper uses SGD but RMSprop works better for DenseNet
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(1e-3),
metrics=['acc'])
model.summary()
# enable this if pydot can be installed
# pip install pydot
#plot_model(model, to_file="cifar10-densenet.png", show_shapes=True)
# prepare model model saving directory
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_densenet_model.{epoch:02d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# prepare callbacks for model saving and for learning rate reducer
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# run training, with or without data augmentation
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (deg 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally
height_shift_range=0.1, # randomly shift images vertically
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
steps_per_epoch = math.ceil(len(x_train) / batch_size)
# fit the model on the batches generated by datagen.flow().
model.fit(x=datagen.flow(x_train, y_train, batch_size=batch_size),
verbose=1,
epochs=epochs,
validation_data=(x_test, y_test),
steps_per_epoch=steps_per_epoch,
callbacks=callbacks)
# fit the model on the batches generated by datagen.flow()
#model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
## steps_per_epoch=x_train.shape[0] // batch_size,
# validation_data=(x_test, y_test),
# epochs=epochs, verbose=1,
# callbacks=callbacks)
# score trained model
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
|
2df6761e433ca703405a77d7ae0abaf9b786625e
|
55a973178ab56ae2a99c7f30bb65dd11270f4acb
|
/capstone/capdb/migrations/0113_auto_20210414_1532.py
|
1da6308e147e4281b72d0ee23c6ed90f5e3c80a9
|
[
"MIT"
] |
permissive
|
harvard-lil/capstone
|
fb8e72bedfe5d902293acb566c864e153da3298e
|
bec56eaa4bfb62a44260e85cf76b421172de10e0
|
refs/heads/develop
| 2023-08-25T11:15:54.572758
| 2023-08-23T13:04:38
| 2023-08-23T13:04:38
| 82,964,836
| 153
| 47
|
MIT
| 2023-09-13T15:07:30
| 2017-02-23T19:44:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
0113_auto_20210414_1532.py
|
# Generated by Django 2.2.20 on 2021-04-14 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capdb', '0112_auto_20210401_1632'),
]
operations = [
migrations.AlterField(
model_name='citation',
name='cite',
field=models.CharField(max_length=10000),
),
migrations.AlterField(
model_name='citation',
name='normalized_cite',
field=models.CharField(max_length=10000, null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='cite',
field=models.CharField(max_length=10000),
),
migrations.AlterField(
model_name='historicalcitation',
name='normalized_cite',
field=models.CharField(max_length=10000, null=True),
),
migrations.AddIndex(
model_name='citation',
index=models.Index(fields=['cite'], name='capdb_citat_cite_14ab80_idx'),
),
migrations.AddIndex(
model_name='citation',
index=models.Index(fields=['normalized_cite'], name='capdb_citat_normali_eedcab_idx'),
),
migrations.AddIndex(
model_name='citation',
index=models.Index(fields=['rdb_cite'], name='capdb_citat_rdb_cit_430bfb_idx'),
),
migrations.AddIndex(
model_name='citation',
index=models.Index(fields=['rdb_normalized_cite'], name='capdb_citat_rdb_nor_cb632c_idx'),
),
]
|
f073d0e94a1ff2724c941f20ae8666afdb4e8964
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/codegen/ccode/scripts/loops.py
|
927a52b85ce7e3784a761456539836ed507c145c
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
loops.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
#==============================================================================
def sum_natural_numbers(n : int):
x = 0
for i in range( 1, n+1 ):
x += i
return x
# ...
def factorial(n : int):
x = 1
for i in range( 2, n+1 ):
x *= i
return x
# ...
def fibonacci(n : int):
x = 0
y = 1
for i in range( n ):
z = x+y
x = y
y = z
return x
# ...
def double_loop(n : int):
x = 0
for i in range( 3, 10 ):
x += 1
y = n*x
for j in range( 4, 15 ):
z = x-y
return z
# ...
def sum_natural_numbers_2(n : int):
x = 0.
for i in range( 1, n+1 ):
x= x+i
return x
# ...
def factorial_2(n : int):
x = 1.
for i in range( 2, n+1 ):
x = x*i
return x
# ...
def fibonacci_2(n : int):
x = 0.
y = 1.
for i in range( n ):
z = x+y
x = y
y = z
return x
# ...
def double_loop_2(n : int):
x = 0.
for i in range( 3, 10 ):
x = x+1
y = n*x
for j in range( 4, 15 ):
z = x-y
return z
|
97ec1aa0285530abbf8bae9033f018e46a5d22a2
|
f20112f340ac7a867a1534776b2635643f1538cc
|
/auth0/rest.py
|
41282b74772ac1dec9b6b23f313b31f23378d415
|
[
"MIT"
] |
permissive
|
auth0/auth0-python
|
213fe75f8a064ee505066e1343ad997bbcb229ff
|
8e3e51bc9ff0cb2f5f68eededf5c4d5b9f5ebb00
|
refs/heads/master
| 2023-09-04T12:40:21.796991
| 2023-09-04T09:58:44
| 2023-09-04T09:58:44
| 22,433,990
| 451
| 180
|
MIT
| 2023-09-12T15:39:20
| 2014-07-30T15:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 11,953
|
py
|
rest.py
|
from __future__ import annotations
import base64
import json
import platform
import sys
from random import randint
from time import sleep
from typing import TYPE_CHECKING, Any, Mapping
import requests
from auth0.exceptions import Auth0Error, RateLimitError
from auth0.types import RequestData, TimeoutType
if TYPE_CHECKING:
from auth0.rest_async import RequestsResponse
UNKNOWN_ERROR = "a0.sdk.internal.unknown"
class RestClientOptions:
"""Configuration object for RestClient. Used for configuring
additional RestClient options, such as rate-limit
retries.
Args:
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
retries (integer): In the event an API request returns a
429 response header (indicating rate-limit has been
hit), the RestClient will retry the request this many
times using an exponential backoff strategy, before
raising a RateLimitError exception. 10 retries max.
(defaults to 3)
"""
def __init__(
self,
telemetry: bool = True,
timeout: TimeoutType = 5.0,
retries: int = 3,
) -> None:
self.telemetry = telemetry
self.timeout = timeout
self.retries = retries
class RestClient:
"""Provides simple methods for handling all RESTful api endpoints.
Args:
jwt (str, optional): The JWT to be used with the RestClient.
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries. Overrides matching
options passed to the constructor.
(defaults to 3)
"""
def __init__(
self,
jwt: str | None,
telemetry: bool = True,
timeout: TimeoutType = 5.0,
options: RestClientOptions | None = None,
) -> None:
if options is None:
options = RestClientOptions(telemetry=telemetry, timeout=timeout)
self.options = options
self.jwt = jwt
self._metrics = {"retries": 0, "backoff": []}
self._skip_sleep = False
self.base_headers = {
"Content-Type": "application/json",
}
if jwt is not None:
self.base_headers["Authorization"] = f"Bearer {self.jwt}"
if options.telemetry:
py_version = platform.python_version()
version = sys.modules["auth0"].__version__
auth0_client = json.dumps(
{
"name": "auth0-python",
"version": version,
"env": {
"python": py_version,
},
}
).encode("utf-8")
self.base_headers.update(
{
"User-Agent": f"Python/{py_version}",
"Auth0-Client": base64.b64encode(auth0_client).decode(),
}
)
# Cap the maximum number of retries to 10 or fewer. Floor the retries at 0.
self._retries = min(self.MAX_REQUEST_RETRIES(), max(0, options.retries))
# For backwards compatibility reasons only
# TODO: Deprecate in the next major so we can prune these arguments. Guidance should be to use RestClient.options.*
self.telemetry = options.telemetry
self.timeout = options.timeout
# Returns a hard cap for the maximum number of retries allowed (10)
def MAX_REQUEST_RETRIES(self) -> int:
return 10
# Returns the maximum amount of jitter to introduce in milliseconds (100ms)
def MAX_REQUEST_RETRY_JITTER(self) -> int:
return 100
# Returns the maximum delay window allowed (1000ms)
def MAX_REQUEST_RETRY_DELAY(self) -> int:
return 1000
# Returns the minimum delay window allowed (100ms)
def MIN_REQUEST_RETRY_DELAY(self) -> int:
return 100
def get(
self,
url: str,
params: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
) -> Any:
request_headers = self.base_headers.copy()
request_headers.update(headers or {})
# Track the API request attempt number
attempt = 0
# Reset the metrics tracker
self._metrics = {"retries": 0, "backoff": []}
while True:
# Increment attempt number
attempt += 1
# Issue the request
response = requests.get(
url,
params=params,
headers=request_headers,
timeout=self.options.timeout,
)
# If the response did not have a 429 header, or the attempt number is greater than the configured retries, break
if response.status_code != 429 or attempt > self._retries:
break
wait = self._calculate_wait(attempt)
# Skip calling sleep() when running unit tests
if self._skip_sleep is False:
# sleep() functions in seconds, so convert the milliseconds formula above accordingly
sleep(wait / 1000)
# Return the final Response
return self._process_response(response)
def post(
self,
url: str,
data: RequestData | None = None,
headers: dict[str, str] | None = None,
) -> Any:
request_headers = self.base_headers.copy()
request_headers.update(headers or {})
response = requests.post(
url, json=data, headers=request_headers, timeout=self.options.timeout
)
return self._process_response(response)
def file_post(
self,
url: str,
data: RequestData | None = None,
files: dict[str, Any] | None = None,
) -> Any:
headers = self.base_headers.copy()
headers.pop("Content-Type", None)
response = requests.post(
url, data=data, files=files, headers=headers, timeout=self.options.timeout
)
return self._process_response(response)
def patch(self, url: str, data: RequestData | None = None) -> Any:
headers = self.base_headers.copy()
response = requests.patch(
url, json=data, headers=headers, timeout=self.options.timeout
)
return self._process_response(response)
def put(self, url: str, data: RequestData | None = None) -> Any:
headers = self.base_headers.copy()
response = requests.put(
url, json=data, headers=headers, timeout=self.options.timeout
)
return self._process_response(response)
def delete(
self,
url: str,
params: dict[str, Any] | None = None,
data: RequestData | None = None,
) -> Any:
headers = self.base_headers.copy()
response = requests.delete(
url,
headers=headers,
params=params or {},
json=data,
timeout=self.options.timeout,
)
return self._process_response(response)
def _calculate_wait(self, attempt: int) -> int:
# Retry the request. Apply a exponential backoff for subsequent attempts, using this formula:
# max(MIN_REQUEST_RETRY_DELAY, min(MAX_REQUEST_RETRY_DELAY, (100ms * (2 ** attempt - 1)) + random_between(1, MAX_REQUEST_RETRY_JITTER)))
# Increases base delay by (100ms * (2 ** attempt - 1))
wait = 100 * 2 ** (attempt - 1)
# Introduces jitter to the base delay; increases delay between 1ms to MAX_REQUEST_RETRY_JITTER (100ms)
wait += randint(1, self.MAX_REQUEST_RETRY_JITTER())
# Is never more than MAX_REQUEST_RETRY_DELAY (1s)
wait = min(self.MAX_REQUEST_RETRY_DELAY(), wait)
# Is never less than MIN_REQUEST_RETRY_DELAY (100ms)
wait = max(self.MIN_REQUEST_RETRY_DELAY(), wait)
self._metrics["retries"] = attempt
self._metrics["backoff"].append(wait) # type: ignore[attr-defined]
return wait
def _process_response(self, response: requests.Response) -> Any:
return self._parse(response).content()
def _parse(self, response: requests.Response) -> Response:
if not response.text:
return EmptyResponse(response.status_code)
try:
return JsonResponse(response)
except ValueError:
return PlainResponse(response)
class Response:
def __init__(
self, status_code: int, content: Any, headers: Mapping[str, str]
) -> None:
self._status_code = status_code
self._content = content
self._headers = headers
def content(self) -> Any:
if self._is_error():
if self._status_code == 429:
reset_at = int(self._headers.get("x-ratelimit-reset", "-1"))
raise RateLimitError(
error_code=self._error_code(),
message=self._error_message(),
reset_at=reset_at,
)
if self._error_code() == "mfa_required":
raise Auth0Error(
status_code=self._status_code,
error_code=self._error_code(),
message=self._error_message(),
content=self._content,
)
raise Auth0Error(
status_code=self._status_code,
error_code=self._error_code(),
message=self._error_message(),
)
else:
return self._content
def _is_error(self) -> bool:
return self._status_code is None or self._status_code >= 400
# Adding these methods to force implementation in subclasses because they are references in this parent class
def _error_code(self):
raise NotImplementedError
def _error_message(self):
raise NotImplementedError
class JsonResponse(Response):
def __init__(self, response: requests.Response | RequestsResponse) -> None:
content = json.loads(response.text)
super().__init__(response.status_code, content, response.headers)
def _error_code(self) -> str:
if "errorCode" in self._content:
return self._content.get("errorCode")
elif "error" in self._content:
return self._content.get("error")
elif "code" in self._content:
return self._content.get("code")
else:
return UNKNOWN_ERROR
def _error_message(self) -> str:
if "error_description" in self._content:
return self._content.get("error_description")
message = self._content.get("message", "")
if message is not None and message != "":
return message
return self._content.get("error", "")
class PlainResponse(Response):
def __init__(self, response: requests.Response | RequestsResponse) -> None:
super().__init__(response.status_code, response.text, response.headers)
def _error_code(self) -> str:
return UNKNOWN_ERROR
def _error_message(self) -> str:
return self._content
class EmptyResponse(Response):
def __init__(self, status_code: int) -> None:
super().__init__(status_code, "", {})
def _error_code(self) -> str:
return UNKNOWN_ERROR
def _error_message(self) -> str:
return ""
|
19cc677a7d30ba74c49b55cab070dd941ba66c8b
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/urlpatterns/more_urls.py
|
8fc3abd51867a235f9bc1fea1ecbaf9d4a07d5b3
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
more_urls.py
|
from django.urls import re_path
from . import views
urlpatterns = [
re_path(
r"^more/(?P<extra>\w+)/$",
views.empty_view,
{"sub-extra": True},
name="inner-more",
),
]
|
2a7327aaf9807b1924f79e59d62d9d03846373fd
|
08fb252e3f70e245cdd0e5a8fed47f6fada8b6f8
|
/examples/BinaryTides/makeplot.py
|
b2d4dae9d0dee62b408e75cf23203db3adc53eef
|
[
"MIT"
] |
permissive
|
VirtualPlanetaryLaboratory/vplanet
|
3bba2126520cbe7bee1a512f87435064d3545517
|
cb683af69e80e07bc17c06e45678effdc98fc19a
|
refs/heads/main
| 2023-08-31T04:13:58.700282
| 2023-08-30T05:10:20
| 2023-08-30T05:10:20
| 138,067,409
| 128
| 54
|
MIT
| 2023-09-06T21:30:37
| 2018-06-20T17:53:00
|
C
|
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
makeplot.py
|
"""
This script produces a reproduction of Figure 1 of Zahn & Bouchet (1989)
using a coupled EQTIDE and STELLAR VPLANET run.
David P. Fleming, University of Washington, 2018
"""
import pathlib
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import vplot
import vplanet
# Path hacks
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
from get_args import get_args
# Typical plot parameters that make for pretty plot
mpl.rcParams["figure.figsize"] = (10, 8)
mpl.rcParams["font.size"] = 18.0
# Run vplanet
output = vplanet.run(path / "vpl.in", units=False)
# Load data
time = output.primary.Time
# Extract important quantities
# saOutputOrder Time -TotEn -TotAngMom -Semim -Radius -RotPer Ecce -RotRate -MeanMotion -OrbPer -SurfEnFluxTotal
Omega = output.secondary.RotRate # Rotation Rate
omega = output.secondary.MeanMotion # Mean Motion
e = output.secondary.Eccentricity
period = 2.0 * np.pi / omega
om = Omega / omega
# Plot it!
fig, ax = plt.subplots()
# Twin the x-axis twice to make independent y-axes.
axes = [ax, ax.twinx(), ax.twinx()]
# Make some space on the right side for the extra y-axis.
fig.subplots_adjust(right=0.75)
# Move the last y-axis spine over to the right by 20% of the width of the axes
axes[-1].spines["right"].set_position(("axes", 1.2))
# To make the border of the right-most axis visible, we need to turn the frame
# on. This hides the other plots, however, so we need to turn its fill off.
axes[-1].set_frame_on(True)
axes[-1].patch.set_visible(False)
# And finally we get to plot things...
data = [e, period, om]
labels = [r"Eccentricity", r"Orbital Period [days]", r"$\Omega$/$n$"]
legend_labels = [r"Eccentricity", r"Orbital Period", r"$\Omega$/$n$"]
linestyle = ["-", "--", "-."]
colors = ["#C91111", "#642197", "#1321D8"]
# Plot each quantities, format axes
for i in range(0, len(axes)):
axes[i].plot(time, data[i], lw=3, ls=linestyle[i], color=colors[i], label="")
axes[i].set_ylabel(labels[i], color=colors[i])
axes[i].tick_params(axis="y", colors=colors[i])
axes[i].set_xlim(time[1], time[-1])
axes[i].set_xscale("log")
axes[i].grid(False)
for ii in range(len(axes)):
ax.plot(
[100], [100], color=colors[ii], ls=linestyle[ii], label=legend_labels[ii], lw=3
)
ax.legend(loc="lower left")
# Additional formatting
axes[1].set_ylabel(labels[1], color=colors[1], labelpad=15)
axes[0].set_ylim(0.0, 0.35)
axes[0].set_xlabel("Time [yr]")
axes[0].tick_params(axis="x", pad=10)
# Add text to plot to mirror Zahn+1989 figure
plt.text(
0.77,
0.9,
r"1 M$_{\odot}$ + 1 M$_{\odot}$",
horizontalalignment="center",
verticalalignment="center",
transform=axes[0].transAxes,
)
# Save figure
fig.tight_layout()
ext = get_args().ext
fig.savefig(path / f"BinaryTides.{ext}", bbox_inches="tight", dpi=600)
|
731811a03b12daa0e273df9b0a0a3fa21ea95d6c
|
c491b5171775447a9ab33a036be1375f7b71ab2f
|
/tests/functional/order/test_emails.py
|
5136ba41651aab95f44227e1b6359d845053bdf7
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
django-oscar/django-oscar
|
e4abd486d51d6173dafbd9c10b59675858196e61
|
5edac196f41f8cc97f8a07f7579f1041db2a02af
|
refs/heads/master
| 2023-08-30T16:19:12.081909
| 2023-07-14T11:53:30
| 2023-07-14T11:53:30
| 1,151,051
| 5,320
| 2,524
|
BSD-3-Clause
| 2023-09-13T19:48:30
| 2010-12-08T21:30:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,759
|
py
|
test_emails.py
|
import os
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from oscar.core.loading import get_class
from oscar.test.factories import ProductImageFactory, create_order
from oscar.test.utils import EmailsMixin, remove_image_folders
OrderDispatcher = get_class("order.utils", "OrderDispatcher")
class TestConcreteEmailsSending(EmailsMixin, TestCase):
def setUp(self):
super().setUp()
self.dispatcher = OrderDispatcher()
def test_send_order_placed_email_for_user(
self, order_number=None, additional_context=None
):
order_number = order_number if order_number else "SOME-NUM00042"
order = create_order(number=order_number, user=self.user, request=self.request)
extra_context = {"order": order, "lines": order.lines.all()}
if additional_context:
extra_context.update(additional_context)
self.dispatcher.send_order_placed_email_for_user(order, extra_context)
self._test_common_part()
expected_subject = "Confirmation of order {}".format(order_number)
assert expected_subject == mail.outbox[0].subject
assert "Your order contains:" in mail.outbox[0].body
product_title = order.lines.first().title
assert product_title in mail.outbox[0].body
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example.com"])
def test_send_order_placed_email_for_user_multisite(self):
with self.assertRaises(
ImproperlyConfigured, msg=self.DJANGO_IMPROPERLY_CONFIGURED_MSG
):
self.test_send_order_placed_email_for_user()
additional_context = {"request": self.request}
self.test_send_order_placed_email_for_user(
order_number="SOME-NUM00043", additional_context=additional_context
)
def test_send_order_placed_email_with_attachments_for_user(
self, order_number=None, additional_context=None
):
remove_image_folders()
order_number = order_number if order_number else "SOME-NUM00042"
order = create_order(number=order_number, user=self.user, request=self.request)
extra_context = {"order": order, "lines": order.lines.all()}
if additional_context:
extra_context.update(additional_context)
line = order.lines.first()
product_image = ProductImageFactory(product=line.product)
attachments = [
["fake_file.html", b"file_content", "text/html"],
["fake_image.png", b"file_content", "image/png"],
product_image.original.path, # To test sending file from `FileField` based field
]
self.dispatcher.send_order_placed_email_for_user(
order, extra_context, attachments
)
# All attachments were sent with email
assert len(mail.outbox[0].attachments) == 3
expected_attachments = ["fake_file.html", "fake_image.png", "test_image.jpg"]
assert [
attachment[0] for attachment in mail.outbox[0].attachments
] == expected_attachments
# Remove test file
os.remove(product_image.original.path)
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example.com"])
def test_send_order_placed_email_with_attachments_for_user_multisite(self):
with self.assertRaises(
ImproperlyConfigured, msg=self.DJANGO_IMPROPERLY_CONFIGURED_MSG
):
self.test_send_order_placed_email_with_attachments_for_user()
additional_context = {"request": self.request}
self.test_send_order_placed_email_with_attachments_for_user(
order_number="SOME-NUM00043", additional_context=additional_context
)
|
f98568e816cf58ca8e090c8152d2d6a18f0414f7
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/ansible/roles/lib_openshift_3.2/build/ansible/oc_label.py
|
b20c70758b45a6e71dcdba552761e3844c0fa69c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,247
|
py
|
oc_label.py
|
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for labels
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list', 'add']),
debug=dict(default=False, type='bool'),
kind=dict(default=None, type='str', required=True,
choices=['node', 'pod', 'namespace']),
name=dict(default=None, type='str'),
namespace=dict(default=None, type='str'),
labels=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
host=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive = (['name', 'selector']),
)
oc_label = OCLabel(module.params['name'],
module.params['namespace'],
module.params['kind'],
module.params['kubeconfig'],
module.params['labels'],
module.params['selector'],
verbose=module.params['debug'])
state = module.params['state']
name = module.params['name']
selector = module.params['selector']
api_rval = oc_label.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
#######
# Add
#######
if state == 'add':
if not (name or selector):
module.fail_json( msg="Parameter 'name' or 'selector' is required if state == 'add'")
if not oc_label.all_user_labels_exist():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an addition.')
api_rval = oc_label.add()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="add")
module.exit_json(changed=False, state="add")
########
# Delete
########
if state == 'absent':
if not (name or selector):
module.fail_json( msg="Parameter 'name' or 'selector' is required if state == 'absent'")
if oc_label.any_label_exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_label.delete()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Update
########
if not (name or selector):
module.fail_json( msg="Parameter 'name' or 'selector' is required if state == 'present'")
# if all the labels passed in don't already exist
# or if there are currently stored labels that haven't
# been passed in
if not oc_label.all_user_labels_exist() or \
oc_label.extra_current_labels():
if module.check_mode:
module.exit_json(changed=False, msg='Would have made changes.')
api_rval = oc_label.replace()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_label.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
8067130d7f6280452761dbe8d0c5d4c2b4a98d68
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/models/properties/modular_properties/coolprop/coolprop_forms.py
|
c6ea1f79fa7906516273247d1eeecc27c1af5aed
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,380
|
py
|
coolprop_forms.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
This module contains functions for constructing CoolProp expressions.
"""
from pyomo.environ import exp, units as pyunits, Var
from pyomo.core.expr.calculus.derivatives import Modes, differentiate
from idaes.core.util.exceptions import ConfigurationError
# TODO : Only have temperature derivative expression for exponential_tau form
# TODO : Add other derivative forms as/if required
def parameters_nt_sum(cobj, prop, nlist, tlist):
"""
Create parameters for expression forms using n-t parameters
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
nlist: list of values for n-parameter
tlist: list of values for t-parameter
Returns:
None
"""
if len(nlist) != len(tlist):
raise ConfigurationError(
f"{cobj.name} mismatched length between n and t parameters "
f"for CoolProp exponential form for property {prop}. Please "
f"ensure the number of n and t parameters are equal."
)
# Use multiple Vars, instead of single indexed Var, to have same
# structure as cases where each parameter value has different units
for i, nval in enumerate(nlist):
coeff = Var(
doc="Multiplying parameter for CoolProp exponential form",
units=pyunits.dimensionless,
)
cobj.add_component(prop + "_coeff_n" + str(i + 1), coeff)
coeff.fix(nval)
for i, tval in enumerate(tlist):
coeff = Var(
doc="Exponent parameter for CoolProp exponential form",
units=pyunits.dimensionless,
)
cobj.add_component(prop + "_coeff_t" + str(i + 1), coeff)
coeff.fix(tval)
def _nt_sum(cobj, prop, theta):
"""
Create sum expressions in n-t forms (sum(n[i]*theta**t[i]))
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
theta: expression or variable to use for theta in expression
Returns:
Pyomo expression of sum term
"""
# Build sum term
i = 1
s = 0
while True:
try:
ni = getattr(cobj, f"{prop}_coeff_n{i}")
ti = getattr(cobj, f"{prop}_coeff_t{i}")
s += ni * theta**ti
i += 1
except AttributeError:
break
return s
def expression_exponential(cobj, prop, T, yc, tau=False):
"""
Create expressions for CoolProp exponential sum forms. This function
supports both exponential forms used by CoolProp:
Without tau: y = yc * exp(sum(ni*theta^ti))
With tau: y = yc * exp((Tc/T) * sum(ni*theta^ti))
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
T: temperature to use in expression
yc: value of property at critical point
tau: whether tau=Tc/T should be included in expression (default=False)
Returns:
Pyomo expression matching CoolProp exponential sum form
"""
Tc = cobj.temperature_crit
theta = 1 - T / Tc
s = _nt_sum(cobj, prop, theta)
if tau:
return yc * exp(Tc / T * s)
else:
return yc * exp(s)
def dT_expression_exponential(cobj, prop, T, yc, tau=False):
"""
Create expressions for temperature derivative of CoolProp exponential sum
forms with tau
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
T: temperature to use in expression
yc: value of property at critical point
tau: whether tau=Tc/T should be included in expression (default=False)
Returns:
Pyomo expression for temperature derivative of CoolProp exponential sum
form with tau
"""
# y = yc * exp(Tc/T * sum(ni*theta^ti))
# Need d(y)/dT
y = expression_exponential(cobj, prop, T, yc, tau)
return differentiate(expr=y, wrt=T, mode=Modes.reverse_symbolic)
def expression_nonexponential(cobj, prop, T, yc):
"""
Create expressions for CoolProp non-exponential sum forms
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
T: temperature to use in expression
yc: value of property at critical point
Returns:
Pyomo expression matching CoolProp non-exponential sum form
"""
# y = yc * (1 + sum(ni*theta^ti))
Tc = cobj.temperature_crit
theta = 1 - T / Tc
s = _nt_sum(cobj, prop, theta)
return yc * (1 + s)
def parameters_polynomial(cobj, prop, prop_units, alist, blist):
"""
Create parameters for expression forms using A-B parameters (rational
polynomial forms)
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
prop_units: units of measurement for property
Alist: list of values for A-parameter
Blist: list of values for B-parameter
Returns:
None
"""
for i, aval in enumerate(alist):
if i == 0:
param_units = prop_units
else:
param_units = prop_units / pyunits.K**i
coeff = Var(doc="A parameter for CoolProp polynomial form", units=param_units)
cobj.add_component(prop + "_coeff_A" + str(i), coeff)
coeff.fix(aval)
for i, bval in enumerate(blist):
if i == 0:
param_units = pyunits.dimensionless
else:
param_units = pyunits.K**-i
coeff = Var(doc="B parameter for CoolProp exponential form", units=param_units)
cobj.add_component(prop + "_coeff_B" + str(i), coeff)
coeff.fix(bval)
def expression_polynomial(cobj, prop, T):
"""
Create expressions for CoolProp rational polynomial forms
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
T: temperature to use in expression
Returns:
Pyomo expression matching CoolProp rational polynomial form
"""
i = 0
asum = 0
try:
while True:
Ai = getattr(cobj, f"{prop}_coeff_A{i}")
asum += Ai * T**i
i += 1
except AttributeError:
pass
i = 0
bsum = 0
try:
while True:
Bi = getattr(cobj, f"{prop}_coeff_B{i}")
bsum += Bi * T**i
i += 1
except AttributeError:
pass
return asum / bsum
|
292ecfa0fdadfcdc5c2dfcbd58357ec4c2000aab
|
9de0cec678bc4a3bec2b4adabef9f39ff5b4afac
|
/PWGJE/EMCALJetTasks/Tracks/analysis/plots/TriggeredSpectrumComparisonPlot.py
|
db520bccbaad025a880ed2ee312525394989f616
|
[] |
permissive
|
alisw/AliPhysics
|
91bf1bd01ab2af656a25ff10b25e618a63667d3e
|
5df28b2b415e78e81273b0d9bf5c1b99feda3348
|
refs/heads/master
| 2023-08-31T20:41:44.927176
| 2023-08-31T14:51:12
| 2023-08-31T14:51:12
| 61,661,378
| 129
| 1,150
|
BSD-3-Clause
| 2023-09-14T18:48:45
| 2016-06-21T19:31:29
|
C++
|
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
TriggeredSpectrumComparisonPlot.py
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.ComparisonData import ComparisonData,ComparisonObject,ComparisonPlot
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import Frame
class PtSpectrumFrame(Frame):
def __init__(self, name):
Frame.__init__(self, "ptframe%s" %(name), 0., 100., 1e-10, 100)
self.SetXtitle("p_{t} (GeV/c)")
self.SetYtitle("1/N_{event} dN/dp_{t} ((GeV/c)^{-1})")
class EnergySpectrumFrame(Frame):
def __init__(self, name):
Frame.__init__(self, "ptframe%s" %(name), 0., 100., 1e-10, 100)
self.SetXtitle("p_{t} (GeV/c)")
self.SetYtitle("1/N_{event} dN/dp_{t} ((GeV/c)^{-1})")
class SpectraComparisonObject(ComparisonObject):
def __init__(self, trigger, data, style):
ComparisonObject.__init__(self, data, style)
self.__triggername = trigger
def GetLegendTitle(self):
return self.__triggername
def GetObjectName(self):
return "Rawspectrum%s" %(self.__triggername)
class TriggeredSpectrumComparisonPlot(ComparisonPlot):
"""
Comparing raw spectra of different classes
"""
def __init__(self, frame, name = "spectrumcomparison"):
"""
Constructor
"""
ComparisonPlot.__init__(self)
self._comparisonContainer = ComparisonData()
self.SetFrame(frame)
self.SetLegendAttributes(0.5, 0.65, 0.89, 0.89)
self.SetPadAttributes(True, True, False, False)
self.__name = name
def AddSpectrum(self, trigger, spectrum, style):
self._comparisonContainer.AddEntry(SpectraComparisonObject(trigger, spectrum, style))
def Create(self):
self._Create("canvas%s" %(self.__name), "Spectrum Comparison %s" %(self.__name))
class PtTriggeredSpectrumComparisonPlot(TriggeredSpectrumComparisonPlot):
def __init__(self,name):
TriggeredSpectrumComparisonPlot.__init__(self, PtSpectrumFrame(name), name)
class EnergyTriggeredSpectrumComparisonPlot(TriggeredSpectrumComparisonPlot):
def __init__(self, name):
TriggeredSpectrumComparisonPlot.__init__(self, EnergySpectrumFrame(name), name)
|
923a07a5405a31b39ce0a1f839b331783c7a9746
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/examples/appearance/examples_nayatani95.py
|
1ec159478d53aaa0a919c02bf51564358c9ad75c
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
examples_nayatani95.py
|
"""Showcases *Nayatani (1995)* colour appearance model computations."""
import numpy as np
import colour
from colour.appearance.nayatani95 import CAM_ReferenceSpecification_Nayatani95
from colour.utilities import message_box
message_box('"Nayatani (1995)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_n = np.array([95.05, 100.00, 108.88])
Y_o = 20.0
E_o = 5000.0
E_or = 1000.0
message_box(
f'Converting to the "Nayatani (1995)" colour appearance model '
f"specification using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_n: {XYZ_n}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o: {E_o}\n"
f"\tE_or: {E_or}"
)
specification = colour.XYZ_to_Nayatani95(XYZ, XYZ_n, Y_o, E_o, E_or)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "Nayatani (1995)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_Nayatani95(*specification.values))
|
05603932b9c677c80deb8c3a1bde6a4fe5056692
|
ff79c73d6c4d9f53099880a9ce5f614685268601
|
/fuzzers/machxo3/021-glb-entry/1300/fuzzer.py
|
35b9e7aaa10518808f44e31a6cbd950c151bf6a6
|
[
"ISC",
"MIT"
] |
permissive
|
YosysHQ/prjtrellis
|
79f88b5a398c67730601813330f77826902b7664
|
e830a28077e1a789d32e75841312120ae624c8d6
|
refs/heads/master
| 2023-08-06T17:00:57.091823
| 2023-07-03T15:20:30
| 2023-07-03T15:20:30
| 123,840,862
| 152
| 33
|
NOASSERTION
| 2023-09-08T23:34:29
| 2018-03-04T23:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
fuzzer.py
|
from fuzzconfig import FuzzConfig
import interconnect
import pytrellis
jobs = [
{
"cfg": FuzzConfig(job="GLB_ENTRY", family="MachXO3", device="LCMXO3LF-1300E", ncl="tap.ncl",
tiles=["CENTER6:CENTER_EBR_CIB"]),
"left_net": "R6C{}_HPSX{:02d}00",
"right_net": "R6C{}_HPSX{:02d}00"
},
]
def main():
# left_end and right_end are 1200HC-specific. However, the results
# also readily apply to 2000HC devices because they also have a
# CENTER_EBR_CIB tile (without qualifiers).
def left_end(x):
return 8 if x % 2 == 0 else 7
def right_end(x):
if x == 0 or x == 4:
return 18
elif x == 1 or x == 5:
return 19
else:
return 17
pytrellis.load_database("../../../../database")
for job in jobs:
cfg = job["cfg"]
cfg.setup()
netnames = []
netnames += [job["left_net"].format(left_end(x), x) for x in range(8)]
netnames += [job["right_net"].format(right_end(x), x) for x in range(8)]
interconnect.fuzz_interconnect_with_netnames(config=cfg, netnames=netnames,
netname_filter_union=False)
if __name__ == "__main__":
main()
|
30768f4bfa423d9174e52243536456c5710eb37f
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/sympy/concrete/__init__.py
|
5ff320415b24b165c24060431c5125fd198e0b9a
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 78
|
py
|
__init__.py
|
from .products import product, Product
from .summations import summation, Sum
|
0062225066346b04fb1688d623bbd6f108bd3638
|
96c1f13473cf224113185902edd4c9c01091e106
|
/tests/theseus_tests/optimizer/linear/test_lu_cuda_sparse_solver.py
|
6a1db2aab7c0d6e180b1dd8767ccc3d6a0594a3b
|
[
"MIT"
] |
permissive
|
facebookresearch/theseus
|
f1e488eb5a25f5ba74a6995911bee958b5da4cf3
|
240e1206329d42fedd40399684d6e17e455c6645
|
refs/heads/main
| 2023-08-11T07:33:12.328520
| 2023-08-02T12:58:01
| 2023-08-02T12:58:01
| 429,570,359
| 1,410
| 105
|
MIT
| 2023-08-01T14:30:01
| 2021-11-18T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,476
|
py
|
test_lu_cuda_sparse_solver.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest # noqa: F401
import torch
import theseus as th
def _build_sparse_mat(batch_size, rng):
all_cols = list(range(10))
col_ind = []
row_ptr = [0]
for i in range(12):
start = max(0, i - 2)
end = min(i + 1, 10)
col_ind += all_cols[start:end]
row_ptr.append(len(col_ind))
data = torch.randn((batch_size, len(col_ind)), dtype=torch.double, generator=rng)
return 12, 10, data, col_ind, row_ptr
@pytest.mark.cudaext
@pytest.mark.parametrize("batch_size", [1, 32])
@pytest.mark.parametrize("float_damping", [True, False])
def test_sparse_solver(batch_size: int, float_damping: bool):
rng = torch.Generator()
rng.manual_seed(0)
if not torch.cuda.is_available():
return
void_objective = th.Objective()
void_ordering = th.VariableOrdering(void_objective, default_order=False)
solver = th.LUCudaSparseSolver(
void_objective,
linearization_kwargs={"ordering": void_ordering},
)
linearization = solver.linearization
void_objective._batch_size = batch_size
num_rows, num_cols, data, col_ind, row_ptr = _build_sparse_mat(batch_size, rng)
linearization.num_rows = num_rows
linearization.num_cols = num_cols
linearization.A_val = data.cuda()
linearization.A_col_ind = col_ind
linearization.A_row_ptr = row_ptr
linearization.b = torch.randn(
(batch_size, num_rows), dtype=torch.double, generator=rng
).cuda()
# Only need this line for the test since the objective is a mock
solver.reset(batch_size=batch_size)
if float_damping:
damping = 1e-4
else:
damping = 0.01 * torch.rand(batch_size, generator=rng) # type: ignore
solved_x = solver.solve(damping=damping, ellipsoidal_damping=False)
for i in range(batch_size):
csrAi = linearization.structure().csr_straight(linearization.A_val[i, :].cpu())
Ai = torch.tensor(csrAi.todense(), dtype=torch.double)
ata = Ai.T @ Ai
b = linearization.b[i].cpu()
atb = torch.DoubleTensor(csrAi.transpose() @ b)
# the linear system solved is with matrix AtA
solved_xi_cpu = solved_x[i].cpu()
damp = damping if float_damping else damping[i] # type: ignore
atb_check = ata @ solved_xi_cpu + damp * solved_xi_cpu
torch.testing.assert_close(atb, atb_check, atol=1e-2, rtol=1e-2)
def check_sparse_solver_multistep(batch_size: int, test_exception: bool):
rng = torch.Generator()
rng.manual_seed(37)
if not torch.cuda.is_available():
return
num_steps = 3
torch.manual_seed(37)
void_objective = th.Objective()
void_ordering = th.VariableOrdering(void_objective, default_order=False)
solver = th.LUCudaSparseSolver(
void_objective, linearization_kwargs={"ordering": void_ordering}
)
linearization = solver.linearization
void_objective._batch_size = batch_size
num_rows, num_cols, data, col_ind, row_ptr = _build_sparse_mat(batch_size, rng)
linearization.num_rows = num_rows
linearization.num_cols = num_cols
linearization.A_col_ind = col_ind
linearization.A_row_ptr = row_ptr
# Only need this line for the test since the objective is a mock
solver.reset(
batch_size=batch_size,
num_solver_contexts=(num_steps - 1) if test_exception else num_steps,
)
As = [
torch.randn(
(batch_size, len(col_ind)), dtype=torch.double, generator=rng
).cuda()
for _ in range(num_steps)
]
bs = [
torch.randn((batch_size, num_rows), dtype=torch.double, generator=rng).cuda()
for _ in range(num_steps)
]
c = torch.randn((batch_size, num_cols), dtype=torch.double, generator=rng).cuda()
# batched dot product
def batched_dot(a, b):
return torch.sum(a * b, dim=1)
# computes accum = sum(A_i \ b_i), returns dot(accum, c)
def iterate_solver(As, bs):
accum = None
for A, b in zip(As, bs):
linearization.A_val = A
linearization.b = b
res = solver.solve()
accum = res if accum is None else (accum + res)
return batched_dot(c, accum)
for A, b in zip(As, bs):
A.requires_grad = True
b.requires_grad = True
result = iterate_solver(As, bs)
# if insufficient contexts, assert exception is raised
if test_exception:
with pytest.raises(RuntimeError):
result.backward(torch.ones_like(result))
return
# otherwise, compute and check gradient
result.backward(torch.ones_like(result))
# we select random vectors `perturb` and check if the (numerically
# approximated) directional derivative matches with dot(perturb, grad)
epsilon = 1e-7
num_checks = 10
for i in range(num_checks):
for perturb_A in [False, True]:
for step in range(num_steps):
perturbed_As = [A.detach().clone() for A in As]
perturbed_bs = [b.detach().clone() for b in bs]
if perturb_A:
perturb = torch.randn(
(batch_size, len(col_ind)), dtype=torch.double, generator=rng
).cuda()
perturbed_As[step] += perturb * epsilon
analytic_der = batched_dot(perturb, As[step].grad)
else:
perturb = torch.randn(
(batch_size, num_rows), dtype=torch.double, generator=rng
).cuda()
perturbed_bs[step] += perturb * epsilon
analytic_der = batched_dot(perturb, bs[step].grad)
perturbed_result = iterate_solver(perturbed_As, perturbed_bs)
numeric_der = (perturbed_result - result) / epsilon
torch.testing.assert_close(
numeric_der, analytic_der, rtol=1e-3, atol=1e-3
)
@pytest.mark.cudaext
@pytest.mark.parametrize("batch_size", [1, 32])
def test_sparse_solver_multistep_gradient(batch_size):
check_sparse_solver_multistep(batch_size, False)
@pytest.mark.cudaext
@pytest.mark.parametrize("batch_size", [1, 32])
def test_sparse_solver_multistep_exception(batch_size):
check_sparse_solver_multistep(batch_size, True)
|
73ff215d7ce221475419ee388ae38b46c52d048c
|
6e0d8d91dd22e2275cd713822679d5cabbc9331a
|
/thespian/system/systemBase.py
|
634fa0a72b22f9878e7ee739c8b1927a49199210
|
[
"MIT"
] |
permissive
|
kquick/Thespian
|
711712eb0a9ad3370f1013c8393cc461b9541dfe
|
dfc6d3e865c05f929328b85e98671a5c8fc3a54a
|
refs/heads/master
| 2023-05-26T15:51:57.959690
| 2023-05-22T15:08:00
| 2023-05-22T15:08:00
| 78,292,621
| 203
| 32
|
MIT
| 2021-06-22T14:42:09
| 2017-01-07T17:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 20,570
|
py
|
systemBase.py
|
'''The systemBase provides the base class implementation for standard
system Base implementations. This systemBase itself is not intended
to be instantiated as the regular Thespian System Base, but instead it
provides a base class that should be subclassed by the various System
Base implementations.
'''
import logging
from thespian.actors import *
from thespian.system import *
from thespian.system.utilis import thesplog
from thespian.system.timing import toTimeDeltaOrNone, ExpirationTimer, unexpired
from thespian.system.messages.admin import *
from thespian.system.messages.status import *
from thespian.system.transport import *
import threading
from contextlib import closing
from datetime import timedelta
import os
MAX_SYSTEM_SHUTDOWN_DELAY = timedelta(seconds=10)
MAX_CHILD_ACTOR_CREATE_DELAY = timedelta(seconds=50)
MAX_CAPABILITY_UPDATE_DELAY = timedelta(seconds=5)
MAX_LOAD_SOURCE_DELAY = timedelta(seconds=61)
MAX_ADMIN_STATUS_REQ_DELAY = timedelta(seconds=2)
MAX_TELL_PERIOD = timedelta(seconds=60)
def ensure_TZ_set():
# Actor engines handle timeouts and tend to sample system time
# frequently. Under Linux, if TZ is not set to a value,
# /etc/localtime or similar is consulted on each call to obtain
# system time which can negatively affect performance. This
# function attempts to set TZ if possible/reasonable.
if 'TZ' in os.environ:
return
for fname in ('/etc/localtime',
'/usr/local/etc/localtime'):
if os.path.exists(fname):
os.environ['TZ'] = ':' + fname
return
# OK if it's not set, just may be slower
class TransmitTrack(object):
def __init__(self, transport, adminAddr):
self._newActorAddress = None
self._pcrFAILED = None
self._transport = transport
self._adminAddr = adminAddr
@property
def failed(self):
return self._pcrFAILED is not None
@property
def failure(self):
return self._pcrFAILED
@property
def failure_message(self):
return getattr(self, '_pcrMessage', None)
def transmit_failed(self, result, intent):
if result == SendStatus.DeadTarget and \
intent.targetAddr != self._adminAddr:
# Forward message to the dead letter handler; if the
# forwarding fails, just discard the message.
self._transport.scheduleTransmit(
None,
TransmitIntent(self._adminAddr,
DeadEnvelope(intent.targetAddr, intent.message)))
self._pcrFAILED = result
self._transport.abort_run()
class NewActorResponse(TransmitTrack):
def __init__(self, transport, adminAddr, *args, **kw):
super(NewActorResponse, self).__init__(transport, adminAddr, *args, **kw)
self._newActorAddress = None
@property
def pending(self):
return self._newActorAddress is None and not self.failed
@property
def actor_address(self):
return self._newActorAddress
def __call__(self, envelope):
if isinstance(envelope.message, PendingActorResponse):
self._newActorAddress = False if envelope.message.errorCode else \
envelope.message.actualAddress
self._pcrFAILED = envelope.message.errorCode
self._pcrMessage = getattr(envelope.message, 'errorStr', None)
# Stop running transport; got new actor address (or failure)
return False
# Discard everything else. Previous requests and operations
# may have caused there to be messages sent back to this
# endpoint that are queued ahead of the PendingActorResponse.
return True # Keep waiting for the PendingActorResponse
class ExternalOpsToActors(object):
def __init__(self, adminAddr, transport=None):
self._numPrimaries = 0
self._cv = threading.Condition()
self._transport_runner = False
# Expects self.transport has already been set by subclass __init__
self.adminAddr = adminAddr
if transport:
self.transport = transport
def _run_transport(self, maximumDuration=None, txonly=False,
incomingHandler=None):
# This is where multiple external threads are synchronized for
# receives. Transmits will flow down into the transmit layer
# where they are queued with thread safety, but threads
# blocking on a receive will all be lined up through this point.
max_runtime = ExpirationTimer(maximumDuration)
with self._cv:
while self._transport_runner:
self._cv.wait(max_runtime.view().remainingSeconds())
if max_runtime.view().expired():
return None
self._transport_runner = True
try:
r = Thespian__UpdateWork()
while isinstance(r, Thespian__UpdateWork):
r = self.transport.run(TransmitOnly if txonly else incomingHandler,
max_runtime.view().remaining())
return r
# incomingHandler callback could deadlock on this same thread; is it ever not None?
finally:
with self._cv:
self._transport_runner = False
self._cv.notify()
def _tx_to_actor(self, actorAddress, message):
# Send a message from this external process to an actor.
# Returns a TransmitTrack object that can be used to check for
# transmit errors.
txwatch = TransmitTrack(self.transport, self.adminAddr)
self.transport.scheduleTransmit(
None,
TransmitIntent(actorAddress, message,
onError=txwatch.transmit_failed))
return txwatch
def _tx_to_admin(self, message):
return self._tx_to_actor(self.adminAddr, message)
def newPrimaryActor(self, actorClass, targetActorRequirements, globalName,
sourceHash=None):
self._numPrimaries = self._numPrimaries + 1
actorClassName = '%s.%s'%(actorClass.__module__, actorClass.__name__) \
if hasattr(actorClass, '__name__') else actorClass
with closing(self.transport.external_transport_clone()) as tx_external:
response = NewActorResponse(tx_external, self.adminAddr)
tx_external.scheduleTransmit(
None,
TransmitIntent(self.adminAddr,
PendingActor(actorClassName,
None, self._numPrimaries,
targetActorRequirements,
globalName=globalName,
sourceHash=sourceHash),
onError=response.transmit_failed))
endwait = ExpirationTimer(MAX_CHILD_ACTOR_CREATE_DELAY)
# Do not use _run_transport: the tx_external transport
# context acquired above is unique to this thread and
# should not be synchronized/restricted by other threads.
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
# Other items might abort the transport run... like transmit
# failures on a previous ask() that itself already timed out.
while response.pending and not endwait.view().expired():
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
if response.failed:
if response.failure == PendingActorResponse.ERROR_Invalid_SourceHash:
raise InvalidActorSourceHash(sourceHash)
if response.failure == PendingActorResponse.ERROR_Invalid_ActorClass:
raise InvalidActorSpecification(actorClass,
response.failure_message)
if response.failure == PendingActorResponse.ERROR_Import:
info = response.failure_message
if info:
thesplog('Actor Create Failure, Import Error: %s', info)
raise ImportError(str(actorClass) + ': ' + info)
thesplog('Actor Create Failure, Import Error')
raise ImportError(actorClass)
if response.failure == PendingActorResponse.ERROR_No_Compatible_ActorSystem:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemFailure("Could not request new Actor from Admin (%s)"
% (response.failure))
if response.actor_address:
return response.actor_address
if response.actor_address is False:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemRequestTimeout(
'No response received to PendingActor request to Admin'
' at %s from %s'%(str(self.adminAddr),
str(self.transport.myAddress)))
def tell(self, anActor, msg):
attemptLimit = ExpirationTimer(MAX_TELL_PERIOD)
# transport may not use sockets, but this helps error handling
# in case it does.
import socket
for attempt in range(5000):
try:
txwatch = self._tx_to_actor(anActor, msg)
for attemptTime in unexpired(attemptLimit):
if not self._run_transport(attemptTime.remaining(),
txonly=True):
# all transmits completed
return
if txwatch.failed:
raise ActorSystemFailure(
'Error sending to %s: %s' % (str(anActor),
str(txwatch.failure)))
raise ActorSystemRequestTimeout(
'Unable to send to %s within %s' %
(str(anActor), str(MAX_TELL_PERIOD)))
except socket.error as ex:
import errno
if errno.EMFILE == ex.errno:
import time
time.sleep(0.1)
else:
raise
def listen(self, timeout):
while True:
response = self._run_transport(toTimeDeltaOrNone(timeout))
if not isinstance(response, ReceiveEnvelope):
break
# Do not send miscellaneous ActorSystemMessages to the caller
# that it might not recognize.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
def ask(self, anActor, msg, timeout):
txwatch = self._tx_to_actor(anActor, msg) # KWQ: pass timeout on tx??
askLimit = ExpirationTimer(toTimeDeltaOrNone(timeout))
for remTime in unexpired(askLimit):
response = self._run_transport(remTime.remaining())
if txwatch.failed:
if txwatch.failure in [SendStatus.DeadTarget,
SendStatus.Failed,
SendStatus.NotSent]:
# Silent failure; not all transports can indicate
# this, so for conformity the Dead Letter handler is
# the intended method of handling this issue.
return None
raise ActorSystemFailure('Transmit of ask message to %s failed (%s)'%(
str(anActor),
str(txwatch.failure)))
if not isinstance(response, ReceiveEnvelope):
# Timed out or other failure, give up.
break
# Do not send miscellaneous ActorSystemMessages to the
# caller that it might not recognize. If one of those was
# recieved, loop to get another response.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
class systemBase(ExternalOpsToActors):
"""This is the systemBase base class that various Thespian System Base
implementations should subclass. The System Base is
instantiated by each process that wishes to utilize an Actor
System and runs in the context of that process (as opposed to
the System Admin that may run in its own process).
This base is not present in the Actors themselves, only in the
external application that wish to talk to Actors.
Depending on the System Base implementation chosen by that
process, the instantiation may be private to that process or
shared by other processes; in the former case, there will be an
instance of this class in each process accessing the shared
Actor System, representing the Portal between the "external"
environment of that process and the shared Actor System
Implementation.
All ActorAddresses generated via newActor and newPrimaryActor
are local to this ActorSystemBase instance. Any and *all*
messages sent to other Actors must be able to be appropriately
serialized; this allows the pickling/unpickling process to
translate an ActorAddress from a local representation to a
global or remote representation.
"""
def __init__(self, system, logDefs = None):
ensure_TZ_set()
# Expects self.transport has already been set by subclass __init__
super(systemBase, self).__init__(
self.transport.getAdminAddr(system.capabilities))
tryingTime = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY + timedelta(seconds=1))
while not tryingTime.view().expired():
if not self.transport.probeAdmin(self.adminAddr):
self._startAdmin(self.adminAddr,
self.transport.myAddress,
system.capabilities,
logDefs)
if self._verifyAdminRunning(): return
import time
time.sleep(0.5) # Previous version may have been exiting
if not self._verifyAdminRunning():
raise InvalidActorAddress(self.adminAddr,
'not a valid or useable ActorSystem Admin')
# KWQ: more details? couldn't start @ addr? response was ? instead of expected Thespian_SystemStatus?
def _verifyAdminRunning(self):
"""Returns boolean verification that the Admin is running and
available. Will query the admin for a positive response,
blocking until one is received.
"""
txwatch = self._tx_to_admin(QueryExists())
response = self._run_transport(MAX_ADMIN_STATUS_REQ_DELAY)
return not txwatch.failed and \
isinstance(response, ReceiveEnvelope) and \
isinstance(response.message, QueryAck) \
and not response.message.inShutdown
def __getstate__(self):
raise CannotPickle('ActorSystem cannot be Pickled.')
def shutdown(self):
thesplog('ActorSystem shutdown requested.', level=logging.INFO)
time_to_quit = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY)
txwatch = self._tx_to_admin(SystemShutdown())
for remaining_time in unexpired(time_to_quit):
response = self._run_transport(remaining_time.remaining())
if txwatch.failed:
thesplog('Could not send shutdown request to Admin'
'; aborting but not necessarily stopped',
level=logging.WARNING)
return
if isinstance(response, ReceiveEnvelope):
if isinstance(response.message, SystemShutdownCompleted):
break
else:
thesplog('Expected shutdown completed message, got: %s', response.message,
level=logging.WARNING)
elif isinstance(response, (Thespian__Run_Expired,
Thespian__Run_Terminated,
Thespian__Run_Expired)):
break
else:
thesplog('No response to Admin shutdown request; Actor system not completely shutdown',
level=logging.ERROR)
self.transport.close()
thesplog('ActorSystem shutdown complete.')
def updateCapability(self, capabilityName, capabilityValue=None):
attemptLimit = ExpirationTimer(MAX_CAPABILITY_UPDATE_DELAY)
txwatch = self._tx_to_admin(CapabilityUpdate(capabilityName,
capabilityValue))
for remaining_time in unexpired(attemptLimit):
if not self._run_transport(remaining_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending capability updates to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout(
'Unable to confirm capability update in %s' %
str(MAX_CAPABILITY_UPDATE_DELAY))
def loadActorSource(self, fname):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
f = fname if hasattr(fname, 'read') else open(fname, 'rb')
try:
d = f.read()
import hashlib
hval = hashlib.md5(d).hexdigest()
txwatch = self._tx_to_admin(
ValidateSource(hval, d, getattr(f, 'name',
str(fname)
if hasattr(fname, 'read')
else fname)))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
# All transmits completed
return hval
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source load to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Load source timeout: ' +
str(loadLimit))
finally:
f.close()
def unloadActorSource(self, sourceHash):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
txwatch = self._tx_to_admin(ValidateSource(sourceHash, None))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source unload to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Unload source timeout: ' +
str(loadLimit))
def external_clone(self):
"""Get a separate local endpoint that does not commingle traffic with
the the main ActorSystem or other contexts. Makes internal
blocking calls, so primarily appropriate for a
multi-threaded client environment.
"""
return BaseContext(self.adminAddr, self.transport)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Actors that involve themselves in topology
def preRegisterRemoteSystem(self, remoteAddress, remoteCapabilities):
self.send(self.adminAddr,
ConventionRegister(
self.transport.getAddressFromString(remoteAddress),
remoteCapabilities,
preRegister=True))
def deRegisterRemoteSystem(self, remoteAddress):
self.send(
self.adminAddr,
ConventionDeRegister(
remoteAddress
if isinstance(remoteAddress, ActorAddress) else
self.transport.getAddressFromString(remoteAddress)))
class BaseContext(ExternalOpsToActors):
def __init__(self, adminAddr, transport):
super(BaseContext, self).__init__(adminAddr,
transport.external_transport_clone())
def exit_context(self):
self.transport.close()
|
19d9371631d55e927327c8ab6d4d3b5fe14902e0
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/CPX_GBoard/touch_hid/code.py
|
4fa799f373775995358357a0c44a102a791e3ea2
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2018 Dave Astels for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Circuit Playground Express GBoard: capacitive touch generating keycodes
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Dave Astels for Adafruit Industries
Copyright (c) 2018 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
import usb_hid
from adafruit_circuitplayground.express import cpx
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
DOT_DURATION = 0.25
DASH_DURATION = 0.5
kbd = Keyboard(usb_hid.devices)
# You can adjust this to get the level of sensitivity you want.
cpx.adjust_touch_threshold(100)
while True:
if cpx.touch_A4:
kbd.send(Keycode.PERIOD)
while cpx.touch_A4:
pass
elif cpx.touch_A3:
kbd.send(Keycode.MINUS)
while cpx.touch_A3:
pass
|
fc6116397d330860a6bafd796c1a11b98674adb8
|
96b7ee94763d477ddbe1534a866fb7e3bd269a7b
|
/tests/unit/objects/test_timeactivity.py
|
fa9f870b662e3abdde22521bf3ee7a15a592e499
|
[
"MIT"
] |
permissive
|
ej2/python-quickbooks
|
a83d088983f2aa6b001d2cb7445f42ea0ab4725a
|
5d29d1fa832496d00af927e35deb9ba78817550d
|
refs/heads/master
| 2023-09-03T23:49:59.551648
| 2023-08-29T16:10:29
| 2023-08-29T16:10:29
| 39,512,055
| 139
| 81
|
MIT
| 2023-08-29T16:15:24
| 2015-07-22T14:50:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
test_timeactivity.py
|
import unittest
from quickbooks import QuickBooks
from quickbooks.objects.timeactivity import TimeActivity
class TimeActivityTests(unittest.TestCase):
def test_unicode(self):
time_activity = TimeActivity()
time_activity.NameOf = "test"
time_activity.TimeZone = "CST"
time_activity.BillableStatus = "test"
time_activity.Taxable = False
time_activity.HourlyRate = 0
time_activity.Hours = 1
time_activity.Minutes = 60
time_activity.BreakHours = 1
time_activity.BreakMinutes = 60
time_activity.Description = "test"
self.assertEqual(str(time_activity), "test")
self.assertEqual(time_activity.TimeZone, "CST")
self.assertEqual(time_activity.BillableStatus, "test")
self.assertEqual(time_activity.Taxable, False)
self.assertEqual(time_activity.HourlyRate, 0)
self.assertEqual(time_activity.Hours, 1)
self.assertEqual(time_activity.Minutes, 60)
self.assertEqual(time_activity.BreakHours, 1)
self.assertEqual(time_activity.BreakMinutes, 60)
self.assertEqual(time_activity.Description, "test")
def test_valid_object_name(self):
obj = TimeActivity()
client = QuickBooks()
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
|
684ebdc780a0a86077cd0d7c4d0cd08d52695647
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/migrations/versions/0100_2_3_0_add_taskmap_and_map_id_on_taskinstance.py
|
9cac7cbc01f24ee37ea9c5be0ff186cbb49fca76
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,032
|
py
|
0100_2_3_0_add_taskmap_and_map_id_on_taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``map_index`` column to TaskInstance to identify task-mapping,
and a ``task_map`` table to track mapping values from XCom.
Revision ID: e655c0453f75
Revises: f9da662e7089
Create Date: 2021-12-13 22:59:41.052584
"""
from __future__ import annotations
from alembic import op
from sqlalchemy import CheckConstraint, Column, ForeignKeyConstraint, Integer, text
from airflow.models.base import StringID
from airflow.utils.sqlalchemy import ExtendedJSON
# Revision identifiers, used by Alembic.
revision = "e655c0453f75"
down_revision = "f9da662e7089"
branch_labels = None
depends_on = None
airflow_version = "2.3.0"
def upgrade():
"""
Add ``map_index`` column to TaskInstance to identify task-mapping,
and a ``task_map`` table to track mapping values from XCom.
"""
# We need to first remove constraints on task_reschedule since they depend on task_instance.
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey")
batch_op.drop_index("idx_task_reschedule_dag_task_run")
# Change task_instance's primary key.
with op.batch_alter_table("task_instance") as batch_op:
# I think we always use this name for TaskInstance after 7b2661a43ba3?
batch_op.drop_constraint("task_instance_pkey", type_="primary")
batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id", "map_index"])
# Re-create task_reschedule's constraints.
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_foreign_key(
"task_reschedule_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id", "map_index"],
["dag_id", "task_id", "run_id", "map_index"],
ondelete="CASCADE",
)
batch_op.create_index(
"idx_task_reschedule_dag_task_run",
["dag_id", "task_id", "run_id", "map_index"],
unique=False,
)
# Create task_map.
op.create_table(
"task_map",
Column("dag_id", StringID(), primary_key=True),
Column("task_id", StringID(), primary_key=True),
Column("run_id", StringID(), primary_key=True),
Column("map_index", Integer, primary_key=True),
Column("length", Integer, nullable=False),
Column("keys", ExtendedJSON, nullable=True),
CheckConstraint("length >= 0", name="task_map_length_not_negative"),
ForeignKeyConstraint(
["dag_id", "task_id", "run_id", "map_index"],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_map_task_instance_fkey",
ondelete="CASCADE",
),
)
def downgrade():
"""Remove TaskMap and map_index on TaskInstance."""
op.drop_table("task_map")
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey")
batch_op.drop_index("idx_task_reschedule_dag_task_run")
batch_op.drop_column("map_index", mssql_drop_default=True)
op.execute("DELETE FROM task_instance WHERE map_index != -1")
with op.batch_alter_table("task_instance") as batch_op:
batch_op.drop_constraint("task_instance_pkey", type_="primary")
batch_op.drop_column("map_index", mssql_drop_default=True)
batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id"])
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.create_foreign_key(
"task_reschedule_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id"],
["dag_id", "task_id", "run_id"],
ondelete="CASCADE",
)
batch_op.create_index(
"idx_task_reschedule_dag_task_run",
["dag_id", "task_id", "run_id"],
unique=False,
)
|
85b7ca1bc49fcc7ddf7e0b3c09029418d366a8c2
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/colossalai/testing/__init__.py
|
0db33361c6a004ababb8d118505ea46e81cccf7d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
__init__.py
|
from .comparison import (
assert_close,
assert_close_loose,
assert_equal,
assert_equal_in_group,
assert_hf_output_close,
assert_not_equal,
check_state_dict_equal,
)
from .pytest_wrapper import run_on_environment_flag
from .utils import (
clear_cache_before_run,
free_port,
parameterize,
rerun_if_address_is_in_use,
rerun_on_exception,
skip_if_not_enough_gpus,
spawn,
)
__all__ = [
'assert_equal', 'assert_not_equal', 'assert_close', 'assert_close_loose', 'assert_equal_in_group', 'parameterize',
'rerun_on_exception', 'rerun_if_address_is_in_use', 'skip_if_not_enough_gpus', 'free_port', 'spawn',
'clear_cache_before_run', 'run_on_environment_flag', 'check_state_dict_equal', 'assert_hf_output_close'
]
|
5bcb93a3676ef55f9dafe20ecfbb055e48968fbb
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/battle_royale/scripts/client/battle_royale/gui/Scaleform/daapi/view/battle/battle_level_panel.py
|
5cfe97452570787b8c9c927dec41aad2fa1962d6
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
battle_level_panel.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: battle_royale/scripts/client/battle_royale/gui/Scaleform/daapi/view/battle/battle_level_panel.py
import BigWorld
import BattleReplay
from helpers import int2roman
import WWISE
from gui.Scaleform.daapi.view.meta.BattleLevelPanelMeta import BattleLevelPanelMeta
from battle_royale.gui.battle_control.controllers.progression_ctrl import IProgressionListener
class BattleLevelPanel(BattleLevelPanelMeta, IProgressionListener):
__SOUND_XP_DIFF = 1
__XP_UPDATE_TIME_DIFF = 1.0
def __init__(self):
super(BattleLevelPanel, self).__init__()
self.__firstShow = True
self.__maxLevelAchieved = False
self.__isInitialized = False
self.__lastXPUpdateTime = 0.0
def updateData(self, arenaLevelData):
animationState = arenaLevelData.xpIsChanged
if arenaLevelData.observedVehicleIsChanged:
animationState = False
self.__maxLevelAchieved = False
self.as_resetS()
if self.__maxLevelAchieved:
return
if BattleReplay.g_replayCtrl.isPlaying:
animationState = False
self.__update(arenaLevelData, animationState)
self.__firstShow = False
def __update(self, arenaLevel, animationState):
if BigWorld.time() - self.__lastXPUpdateTime < self.__XP_UPDATE_TIME_DIFF:
animationState = False
self.__lastXPUpdateTime = BigWorld.time()
if not self.__isInitialized:
self.as_setAnimationS(False)
self.__isInitialized = True
else:
self.as_setAnimationS(animationState)
if not animationState and not arenaLevel.isMaxLvlAchieved:
expText = '{currentXP} / {targetXP}'.format(currentXP=arenaLevel.xp, targetXP=arenaLevel.targetXP)
self.as_setLevelS(int2roman(arenaLevel.level), int2roman(arenaLevel.level + 1), expText)
if arenaLevel.xp == 0 and arenaLevel.level == 1:
expText = ' / {targetXP}'.format(targetXP=arenaLevel.targetXP)
self.as_setLevelS(int2roman(arenaLevel.level), int2roman(arenaLevel.level + 1), expText)
self.as_setExperienceS(0, expText, 0, 0, False)
return
if arenaLevel.levelIsChanged:
expText = ' / {targetXP}'.format(targetXP=arenaLevel.baseXP)
percent = IProgressionListener.MAX_PERCENT_AMOUNT
xp = arenaLevel.baseXP
else:
expText = ' / {targetXP}'.format(targetXP=arenaLevel.targetXP)
percent = arenaLevel.percent
xp = arenaLevel.xp
playSound = arenaLevel.diff >= self.__SOUND_XP_DIFF and not self.__firstShow
self.as_setExperienceS(xp, expText, arenaLevel.diff, percent, playSound)
if arenaLevel.isMaxLvlAchieved:
self.as_setMaxLevelReachedS(int2roman(arenaLevel.level))
self.__maxLevelAchieved = True
return
if arenaLevel.levelIsChanged or self.__firstShow:
expText = '{baseXP} / {targetXP}'.format(baseXP=arenaLevel.baseXP, targetXP=arenaLevel.targetXP)
self.as_setLevelS(int2roman(arenaLevel.level), int2roman(arenaLevel.level + 1), expText)
expText = ' / {targetXP}'.format(targetXP=arenaLevel.targetXP)
self.as_setExperienceS(arenaLevel.xp, expText, arenaLevel.diffAfterLevel, arenaLevel.percent, playSound)
def onPlaySound(self, soundType):
WWISE.WW_eventGlobal(soundType)
|
f6abb928ce5342eae962f2d9234cacc43570a8e9
|
bed34365a9dab825fd9f4a4ff1b0863f441266ac
|
/neutron/tests/unit/services/metering/test_metering_plugin.py
|
33961a20c6ae41931d4fffbf4e487f66a7207b8d
|
[
"Apache-2.0"
] |
permissive
|
openstack/neutron
|
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
|
dde31aae392b80341f6440eb38db1583563d7d1f
|
refs/heads/master
| 2023-08-31T13:09:41.831598
| 2023-08-31T11:37:30
| 2023-08-31T11:37:30
| 2,400,289
| 1,174
| 1,325
|
Apache-2.0
| 2022-06-29T08:00:05
| 2011-09-16T16:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 36,116
|
py
|
test_metering_plugin.py
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.agent import topics
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import context
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.tests import tools
from neutron_lib.utils import net as net_utils
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.db.metering import metering_rpc
from neutron.extensions import l3 as ext_l3
from neutron.extensions import metering as ext_metering
from neutron.objects import agent as agent_obj
from neutron.tests.common import helpers
from neutron.tests.unit.db.metering import test_metering_db
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
_uuid = uuidutils.generate_uuid
METERING_SERVICE_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
class MeteringTestExtensionManager(object):
def get_resources(self):
l3_res = ext_l3.L3.get_resources()
metering_res = ext_metering.Metering.get_resources()
return l3_res + metering_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin'
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'oslo_utils.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.ctx = context.Context('', self._tenant_id).elevated()
self.topic = topics.METERING_AGENT
add = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label')
self.add_patch = mock.patch(add)
self.mock_add = self.add_patch.start()
remove = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label')
self.remove_patch = mock.patch(remove)
self.mock_remove = self.remove_patch.start()
update = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.update_metering_label_rules')
self.update_patch = mock.patch(update)
self.mock_update = self.update_patch.start()
add_rule = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label_rule')
self.add_rule_patch = mock.patch(add_rule)
self.mock_add_rule = self.add_rule_patch.start()
remove_rule = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label_rule')
self.remove_rule_patch = mock.patch(remove_rule)
self.mock_remove_rule = self.remove_rule_patch.start()
def test_routers_updated_on_host_rpc_call(self):
router_test = {
'id': 'xyz',
'name': 'testrouter'}
notify_host = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'._notification_host')
self.notify_patch = mock.patch(notify_host)
self.mock_notify_host = self.notify_patch.start()
metering_rpc_handle = metering_rpc_agent_api.MeteringAgentNotifyAPI()
metering_rpc_handle.routers_updated_on_host(
self.ctx,
[router_test['id']],
'test_host')
self.mock_notify_host.assert_called_with(self.ctx, 'routers_updated',
'test_host', routers=['xyz'])
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1'):
with self.metering_label():
self.mock_add.assert_called_with(mock.ANY, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'},
{'rules': [],
'id': second_uuid, 'shared': True,
'name': 'label'}],
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
with self.router(name='router1', shared=True):
with self.metering_label():
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=tenant_id_2, shared=True):
self.mock_add.assert_called_with(mock.ANY, expected)
def test_remove_metering_label_rpc_call(self):
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
with self.router():
with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected)
self._delete('metering-labels',
label['metering_label']['id'],
as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'},
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
expected_remove = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
with self.router():
with self.metering_label():
self.mock_uuid.return_value = second_uuid
with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected_add)
self._delete('metering-labels',
label['metering_label']['id'],
as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected_remove)
def test_add_and_remove_metering_label_rule_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'destination_ip_prefix': None,
'source_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
expected_del = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'destination_ip_prefix': None,
'source_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
remote_ip_prefix = {'remote_ip_prefix': '10.0.0.0/24'}
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'], **remote_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_source_ip_only(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'destination_ip_prefix': None,
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
expected_del = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'destination_ip_prefix': None,
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
source_ip_prefix = {'source_ip_prefix': '10.0.0.0/24'}
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_dest_ip_only(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'source_ip_prefix': None,
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
expected_del = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'source_ip_prefix': None,
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
source_ip_prefix = {'destination_ip_prefix': '10.0.0.0/24'}
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_dest_ip_only(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
net_utils.AuthenticIPNetwork('0.0.0.0/0'),
'source_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
expected_del = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
net_utils.AuthenticIPNetwork('0.0.0.0/0'),
'source_ip_prefix':
net_utils.AuthenticIPNetwork(
'10.0.0.0/24'),
'remote_ip_prefix': None,
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid},
'id': self.uuid}],
'id': self.uuid}]
ip_prefixes = {'source_ip_prefix': '10.0.0.0/24',
'destination_ip_prefix': '0.0.0.0/0'}
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**ip_prefixes):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_remote_ip(self):
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
self.fmt, la['id'], 'ingress', False,
remote_ip_prefix='0.0.0.0/0',
source_ip_prefix='10.0.0.0/24')
expected_error_code = 500
self.assertEqual(expected_error_code, res.status_int)
expected_error_message = "Cannot use 'remote-ip-prefix' in " \
"conjunction with " \
"'source-ip-prefix' or " \
"'destination-ip-prefix'."
self.assertEqual(
expected_error_message, jsonutils.loads(res.body)[
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_dest_and_remote_ip(self):
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
self.fmt, la['id'], 'ingress', False,
remote_ip_prefix='0.0.0.0/0',
destination_ip_prefix='8.8.8.8/32')
expected_error_code = 500
self.assertEqual(expected_error_code, res.status_int)
expected_error_message = "Cannot use 'remote-ip-prefix' in " \
"conjunction with " \
"'source-ip-prefix' or " \
"'destination-ip-prefix'."
self.assertEqual(
expected_error_message, jsonutils.loads(res.body)[
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_no_ip_prefix_entered(self):
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
self.fmt, la['id'], 'ingress', False)
expected_error_code = 500
self.assertEqual(expected_error_code, res.status_int)
expected_error_message = "You must define at least one of " \
"the following parameters " \
"'remote_ip_prefix', or " \
"'source_ip_prefix' or " \
"'destination_ip_prefix'."
self.assertEqual(
expected_error_message, jsonutils.loads(res.body)[
"NeutronError"]["message"])
def test_delete_metering_label_does_not_clear_router_tenant_id(self):
tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
# TODO(ralonsoh): to investigate why the context in [1] has some value
# in session.transaction._connections, while during a normal operation,
# the ._connections value is empty.
# [1]https://github.com/openstack/neutron/blob/
# 1b9e9a6c2ccf7f9bc06429f53e5126f356ae3d4a/neutron/api/v2/base.py#L563
self.ctx.GUARD_TRANSACTION = False
with self.metering_label(tenant_id=tenant_id) as metering_label:
with self.router(tenant_id=tenant_id) as r:
router = self._show('routers', r['router']['id'],
tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204,
as_admin=True)
router = self._show('routers', r['router']['id'],
tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
class TestMeteringPluginL3AgentScheduler(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin_str=None, service_plugins=None, scheduler=None):
if not plugin_str:
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
if not service_plugins:
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
if not scheduler:
scheduler = plugin_str
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginL3AgentScheduler,
self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'oslo_utils.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.ctx = context.Context('', self._tenant_id).elevated()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
self.l3routers_mock = self.l3routers_patch.start()
self.topic = topics.METERING_AGENT
add = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.add_metering_label')
self.add_patch = mock.patch(add)
self.mock_add = self.add_patch.start()
remove = ('neutron.api.rpc.agentnotifiers.' +
'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
'.remove_metering_label')
self.remove_patch = mock.patch(remove)
self.mock_remove = self.remove_patch.start()
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid},
{'status': 'ACTIVE',
'name': 'router2',
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': second_uuid}]
# bind each router to a specific agent
agent1 = agent_obj.Agent(mock.ANY, host='agent1')
agent2 = agent_obj.Agent(mock.ANY, host='agent2')
agents = {self.uuid: agent1,
second_uuid: agent2}
def side_effect(context, routers, admin_state_up, active):
return [agents[routers[0]]]
self.l3routers_mock.side_effect = side_effect
with self.router(name='router1'):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2'):
with self.metering_label():
self.mock_add.assert_called_with(
mock.ANY, tools.UnorderedList(expected))
class TestMeteringPluginL3AgentSchedulerServicePlugin(
TestMeteringPluginL3AgentScheduler):
"""Unit tests for the case where separate service plugin
implements L3 routing.
"""
def setUp(self):
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS,
'l3_plugin_name': l3_plugin}
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestNoL3NatPlugin')
super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp(
plugin_str=plugin_str, service_plugins=service_plugins,
scheduler=l3_plugin)
class TestMeteringPluginRpcFromL3Agent(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP
)
def setUp(self):
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginRpcFromL3Agent,
self).setUp(plugin=plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.meter_plugin = directory.get_plugin(constants.METERING)
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
self.adminContext = context.get_admin_context()
helpers.register_l3_agent(host='agent1')
def test_get_sync_data_metering(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1', subnet=subnet) as router:
r = router['router']
self._add_external_gateway_to_router(r['id'], s['network_id'])
with self.metering_label(tenant_id=r['tenant_id']):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent1')
self.assertEqual('router1', data[0]['name'])
helpers.register_l3_agent(host='agent2')
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent2')
self.assertFalse(data)
self._remove_external_gateway_from_router(
r['id'], s['network_id'])
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertIn('router1', routers)
self.assertIn('router2', routers)
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertEqual([], routers)
def test_get_sync_data_metering_with_unscheduled_router(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(name='router2'):
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
self.adminContext, host='agent1')
self.assertEqual(
set(['router1']), set([r['name'] for r in data]))
self._remove_external_gateway_from_router(
router1['router']['id'], s['network_id'])
def test_get_sync_data_metering_with_inactive_router(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(
name='router2', admin_state_up=False
) as router2:
self._add_external_gateway_to_router(
router2['router']['id'], s['network_id'])
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
self.adminContext, host='agent1')
self.assertEqual(
set(['router1']), set([r['name'] for r in data]))
self._remove_external_gateway_from_router(
router2['router']['id'], s['network_id'])
self._remove_external_gateway_from_router(
router1['router']['id'], s['network_id'])
|
b9c8e7501b25592eee727073fd526703af2f598f
|
6c3989a6de8521ae478edcd6f457f54baa57f289
|
/plugin/symbols.py
|
a843c4644c1495cfa8a79a63d8508e16d760985a
|
[
"MIT"
] |
permissive
|
sublimelsp/LSP
|
18ba4b72ad390ee4da713d9b383869112c6d2d98
|
e6bbc8ffecd9d705c884c69160132265294c6430
|
refs/heads/main
| 2023-08-31T13:29:36.341484
| 2023-08-18T16:07:57
| 2023-08-18T16:07:57
| 87,645,313
| 909
| 138
|
MIT
| 2023-09-13T19:55:06
| 2017-04-08T15:51:20
|
Python
|
UTF-8
|
Python
| false
| false
| 11,429
|
py
|
symbols.py
|
import weakref
from .core.protocol import Request, DocumentSymbol, SymbolInformation, SymbolKind, SymbolTag
from .core.registry import LspTextCommand
from .core.sessions import print_to_status_bar
from .core.typing import Any, List, Optional, Tuple, Dict, Generator, Union, cast
from .core.views import range_to_region
from .core.views import SUBLIME_KIND_ID_COLOR_SCOPES
from .core.views import SublimeKind
from .core.views import SYMBOL_KINDS
from .core.views import text_document_identifier
from contextlib import contextmanager
import os
import sublime
import sublime_plugin
SUPPRESS_INPUT_SETTING_KEY = 'lsp_suppress_input'
def unpack_lsp_kind(kind: SymbolKind) -> SublimeKind:
return SYMBOL_KINDS.get(kind, sublime.KIND_AMBIGUOUS)
def get_symbol_color_scope_from_lsp_kind(kind: SymbolKind) -> str:
return SUBLIME_KIND_ID_COLOR_SCOPES.get(unpack_lsp_kind(kind)[0], "comment")
def symbol_information_to_quick_panel_item(
item: SymbolInformation,
show_file_name: bool = True
) -> sublime.QuickPanelItem:
st_kind, st_icon, st_display_type = unpack_lsp_kind(item['kind'])
tags = item.get("tags") or []
if SymbolTag.Deprecated in tags:
st_display_type = "⚠ {} - Deprecated".format(st_display_type)
container = item.get("containerName") or ""
details = [] # List[str]
if container:
details.append(container)
if show_file_name:
file_name = os.path.basename(item['location']['uri'])
details.append(file_name)
return sublime.QuickPanelItem(
trigger=item["name"],
details=details,
annotation=st_display_type,
kind=(st_kind, st_icon, st_display_type))
@contextmanager
def _additional_name(names: List[str], name: str) -> Generator[None, None, None]:
names.append(name)
yield
names.pop(-1)
class LspSelectionClearCommand(sublime_plugin.TextCommand):
"""
Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async
context we need to have dedicated commands for this.
https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388
"""
def run(self, _: sublime.Edit) -> None:
self.view.sel().clear()
class LspSelectionAddCommand(sublime_plugin.TextCommand):
def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
for region in regions:
self.view.sel().add(sublime.Region(*region))
class LspSelectionSetCommand(sublime_plugin.TextCommand):
def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
self.view.sel().clear()
for region in regions:
self.view.sel().add(sublime.Region(*region))
class LspDocumentSymbolsCommand(LspTextCommand):
capability = 'documentSymbolProvider'
REGIONS_KEY = 'lsp_document_symbols'
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.old_regions = [] # type: List[sublime.Region]
self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]
def run(self, edit: sublime.Edit, event: Optional[Dict[str, Any]] = None) -> None:
self.view.settings().set(SUPPRESS_INPUT_SETTING_KEY, True)
session = self.best_session(self.capability)
if session:
session.send_request(
Request.documentSymbols({"textDocument": text_document_identifier(self.view)}, self.view),
lambda response: sublime.set_timeout(lambda: self.handle_response(response)),
lambda error: sublime.set_timeout(lambda: self.handle_response_error(error)))
def handle_response(self, response: Union[List[DocumentSymbol], List[SymbolInformation], None]) -> None:
self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)
window = self.view.window()
if window and isinstance(response, list) and len(response) > 0:
panel_items = self.process_symbols(response)
self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]
# Find region that is either intersecting or before to the current selection end.
selected_index = 0
if self.old_regions:
first_selection = self.old_regions[0]
for i, (r, _, _) in enumerate(self.regions):
if r.begin() <= first_selection.b:
selected_index = i
else:
break
self.view.run_command("lsp_selection_clear")
window.show_quick_panel(
panel_items,
self.on_symbol_selected,
sublime.KEEP_OPEN_ON_FOCUS_LOST,
selected_index,
self.on_highlighted)
def handle_response_error(self, error: Any) -> None:
self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)
print_to_status_bar(error)
def region(self, index: int) -> sublime.Region:
return self.regions[index][0]
def selection_region(self, index: int) -> Optional[sublime.Region]:
return self.regions[index][1]
def scope(self, index: int) -> str:
return self.regions[index][2]
def on_symbol_selected(self, index: int) -> None:
if index == -1:
if self.old_regions:
self.view.run_command("lsp_selection_set", {"regions": [(r.a, r.b) for r in self.old_regions]})
self.view.show_at_center(self.old_regions[0].begin())
else:
region = self.selection_region(index)
if not region:
self.view.erase_regions(self.REGIONS_KEY)
region = self.region(index)
self.view.run_command("lsp_selection_set", {"regions": [(region.a, region.a)]})
self.view.show_at_center(region.a)
self.old_regions.clear()
self.regions.clear()
def on_highlighted(self, index: int) -> None:
region = self.selection_region(index)
if region:
self.view.run_command("lsp_selection_set", {"regions": [region.to_tuple()]})
else:
region = self.region(index)
self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)
self.view.show_at_center(region.a)
def process_symbols(
self,
items: Union[List[DocumentSymbol], List[SymbolInformation]]
) -> List[sublime.QuickPanelItem]:
self.regions.clear()
panel_items = []
if 'selectionRange' in items[0]:
items = cast(List[DocumentSymbol], items)
panel_items = self.process_document_symbols(items)
else:
items = cast(List[SymbolInformation], items)
panel_items = self.process_symbol_informations(items)
# Sort both lists in sync according to the range's begin point.
sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))
sorted_regions, sorted_panel_items = sorted_results
self.regions = list(sorted_regions) # type: ignore
return list(sorted_panel_items) # type: ignore
def process_document_symbols(self, items: List[DocumentSymbol]) -> List[sublime.QuickPanelItem]:
quick_panel_items = [] # type: List[sublime.QuickPanelItem]
names = [] # type: List[str]
for item in items:
self.process_document_symbol_recursive(quick_panel_items, item, names)
return quick_panel_items
def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: DocumentSymbol,
names: List[str]) -> None:
lsp_kind = item["kind"]
self.regions.append((range_to_region(item['range'], self.view),
range_to_region(item['selectionRange'], self.view),
get_symbol_color_scope_from_lsp_kind(lsp_kind)))
name = item['name']
with _additional_name(names, name):
st_kind, st_icon, st_display_type = unpack_lsp_kind(lsp_kind)
formatted_names = " > ".join(names)
st_details = item.get("detail") or ""
if st_details:
st_details = "{} | {}".format(st_details, formatted_names)
else:
st_details = formatted_names
tags = item.get("tags") or []
if SymbolTag.Deprecated in tags:
st_display_type = "⚠ {} - Deprecated".format(st_display_type)
quick_panel_items.append(
sublime.QuickPanelItem(
trigger=name,
details=st_details,
annotation=st_display_type,
kind=(st_kind, st_icon, st_display_type)))
children = item.get('children') or [] # type: List[DocumentSymbol]
for child in children:
self.process_document_symbol_recursive(quick_panel_items, child, names)
def process_symbol_informations(self, items: List[SymbolInformation]) -> List[sublime.QuickPanelItem]:
quick_panel_items = [] # type: List[sublime.QuickPanelItem]
for item in items:
self.regions.append((range_to_region(item['location']['range'], self.view),
None, get_symbol_color_scope_from_lsp_kind(item['kind'])))
quick_panel_item = symbol_information_to_quick_panel_item(item, show_file_name=False)
quick_panel_items.append(quick_panel_item)
return quick_panel_items
class SymbolQueryInput(sublime_plugin.TextInputHandler):
def want_event(self) -> bool:
return False
def placeholder(self) -> str:
return "Enter symbol name"
class LspWorkspaceSymbolsCommand(LspTextCommand):
capability = 'workspaceSymbolProvider'
def input(self, _args: Any) -> sublime_plugin.TextInputHandler:
return SymbolQueryInput()
def run(self, edit: sublime.Edit, symbol_query_input: str, event: Optional[Any] = None) -> None:
session = self.best_session(self.capability)
if session:
self.weaksession = weakref.ref(session)
session.send_request(
Request.workspaceSymbol({"query": symbol_query_input}),
lambda r: self._handle_response(symbol_query_input, r),
self._handle_error)
def _open_file(self, symbols: List[SymbolInformation], index: int) -> None:
if index != -1:
session = self.weaksession()
if session:
session.open_location_async(symbols[index]['location'], sublime.ENCODED_POSITION)
def _handle_response(self, query: str, response: Union[List[SymbolInformation], None]) -> None:
if response:
matches = response
window = self.view.window()
if window:
window.show_quick_panel(
list(map(symbol_information_to_quick_panel_item, matches)),
lambda i: self._open_file(matches, i))
else:
sublime.message_dialog("No matches found for query: '{}'".format(query))
def _handle_error(self, error: Dict[str, Any]) -> None:
reason = error.get("message", "none provided by server :(")
msg = "command 'workspace/symbol' failed. Reason: {}".format(reason)
sublime.error_message(msg)
|
fe1fe725ea41db51f741d557ab3154e2d0cd5b2e
|
b1f587ee6eed481af0e453903e1c1ae7a2e7ef87
|
/pyxtal/util.py
|
0a91f8b7195b1754abde3edcc2697fe0f5b816a6
|
[
"MIT"
] |
permissive
|
qzhu2017/PyXtal
|
cdae49664c876c8d2b452b0c3f0db36587c34532
|
9fdb4ec509da6a97a239a3ae4fcfa427dcf32eff
|
refs/heads/master
| 2023-08-20T08:20:37.452641
| 2023-08-11T15:15:36
| 2023-08-11T15:15:36
| 128,165,891
| 194
| 62
|
MIT
| 2023-07-01T13:00:11
| 2018-04-05T06:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 18,088
|
py
|
util.py
|
"""
some utilities
"""
import numpy as np
from spglib import get_symmetry_dataset
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer as sga
from pymatgen.core.structure import Structure
from ase import Atoms
from pyxtal.symmetry import Hall
import re
def find_dir(dirs):
"""
a short function to find the correct dir from a list
"""
skf_dir = None
for d in dirs:
if os.path.isdir(d):
return d
raise RuntimeError("Cannot find the dirtory for dftb parameters")
def listToString(s):
# initialize an empty string
#str1 = " "
str1 = ""
# return string
return (str1.join(s))
def pymatgen2ase(struc):
"""
A short cut to convert between pymatgen to ase
"""
atoms = Atoms(symbols = struc.atomic_numbers, cell = struc.lattice.matrix, pbc=True)
atoms.set_scaled_positions(struc.frac_coords)
return atoms
def ase2pymatgen(struc):
"""
A short cut to convert between pymatgen to ase
"""
lattice = struc.cell
coordinates = struc.get_scaled_positions()
species = struc.get_chemical_symbols()
return Structure(lattice, species, coordinates)
def symmetrize_cell(struc, mode='C'):
"""
symmetrize structure from pymatgen, and return the struc in conventional or
primitive setting.
Args:
struc: ase type
mode: output conventional or primitive cell
"""
P_struc = ase2pymatgen(struc)
finder = sga(P_struc,symprec=0.06)
if mode == 'C':
P_struc = finder.get_conventional_standard_structure()
else:
P_struc = finder.get_primitive_standard_structure()
return pymatgen2ase(P_struc)
def good_lattice(struc, maxvec=25.0, minvec=1.2, maxang=150, minang=30):
"""
check if the lattice has a good shape.
Args:
struc: pyxtal structure
"""
para = struc.lattice.get_para(degree=True)
if (max(para[:3])<maxvec) and (min(para[:3])>minvec)\
and (max(para[3:])<maxang) and (min(para[3:])>minang):
return True
else:
return False
def symmetrize(pmg, tol=1e-3, a_tol=5.0, style='pyxtal', hn=None):
"""
symmetrize the structure from spglib.
Args:
pmg: pymatgen structure
tol: tolerance
a_tol: angle tolerance
style: 'pyxtal' or spglib, differing in the choice of origin
hn: hall_number
Returns:
pymatgen structure with symmetrized lattice
"""
numbers = [site.species.elements[0].Z for site in pmg.sites]
atoms = (pmg.lattice.matrix, pmg.frac_coords, numbers)
dataset = get_symmetry_dataset(atoms, tol, angle_tolerance=a_tol)
if hn is None:
hn = Hall(dataset['number'], style=style).hall_default
if hn != dataset['hall_number']:
dataset = get_symmetry_dataset(atoms, tol,
angle_tolerance=a_tol,
hall_number=hn)
cell = dataset['std_lattice']
pos = dataset['std_positions']
numbers = dataset['std_types']
return Structure(cell, numbers, pos)
def get_symmetrized_pmg(pmg, tol=1e-3, a_tol=5.0, style='pyxtal', hn=None):
"""
Get the symmetrized Pymatgen structure. A slight modification to ensure that
the structure adopts the standard setting according to the Interational
Crystallography Table.
Args:
pmg: input pymatgen structure
tol: symmetry tolerance
a_tol: angle tolerance
style: 'pyxtal' or spglib, differing in the choice of origin
hn: hall_number
Returns:
pymatgen structure with symmetrized lattice
"""
pmg = symmetrize(pmg, tol, a_tol=a_tol, style=style, hn=hn)
s = sga(pmg, symprec=tol, angle_tolerance=a_tol)
# make sure that the coordinates are in standard setting
if hn is None:
hn = Hall(s._space_group_data['number'], style=style).hall_default
if hn != s._space_group_data["hall_number"]:
s._space_group_data = get_symmetry_dataset(s._cell, tol,
angle_tolerance=a_tol,
hall_number=hn)
return s.get_symmetrized_structure(), s.get_space_group_number()
def extract_ase_db(db_file, id):
"""
a short cut to extract the structural information
from the ase db file by row id
"""
from ase.db import connect
from pyxtal import pyxtal
import os
if not os.path.exists("output"):
os.makedirs("output")
print("Dumping the structures to the folder output")
with connect(db_file) as db:
for id in ids:
s = db.get_atoms(id=id)
filename = "output/"+str(id)+".vasp"
s.write(filename, format='vasp', direct=True, vasp5=True)
my = pyxtal()
my.from_seed(s)
print(my)
def parse_cif(filename, header=False, spg=False, eng=False, csd=False, sim=False):
"""
read structures from a cif (our own format with #END)
Args:
filename: string
header: bool, whether or not return header
spg: bool, whether or not return the spg
"""
strings = []
headers = []
spgs = []
engs = []
csds = []
sims = []
with open(filename, 'r') as f:
lines = f.readlines()
start = None
end = None
for i in range(len(lines)):
if lines[i].find("data_") == 0:
if sim:
sims.append(float(lines[i].split(':')[-1]))
end = i
if start is not None:
tmp = []
for l in lines[start:end-1]:
if len(re.findall(r"[0-9][B-C]", l))>0 or \
len(re.findall(r"[A-Z][0-9]\' [0-9]", l))>0:
#print(l) #; import sys; sys.exit()
continue
tmp.append(l)
cif = listToString(tmp)
strings.append(cif)
start = i
headers.append(lines[i])
elif lines[i].find("_symmetry_Int_Tables_number") == 0:
spgs.append(int(lines[i].split()[-1]))
elif lines[i].find("#Energy") == 0:
engs.append(float(lines[i].split()[1]))
elif lines[i].find("_database_code") == 0:
tmp = lines[i].split()[-1]
csds.append(tmp[:-1])
#Last one
tmp = []
for l in lines[start:]:
if len(re.findall(r"[0-9][B-D]", l))>0 or \
len(re.findall(r"[A-Z][0-9]\' [0-9]", l))>0:
#print(l);
continue
tmp.append(l)
cif = listToString(tmp)
strings.append(cif)
if header:
return strings, headers
elif spg:
return strings, spgs
elif eng:
return strings, engs
elif csd:
return strings, csds
elif sim:
return strings, sims
else:
return strings
def process_csd_cif(cif, remove_H=False):
"""
process cif from CSD, sometimes it contains multiple
e.g., C2
"""
lines = cif.split('\n')
tmp = []
for l in lines:
if len(re.findall(r"[0-9][A-Z]", l))>0 or len(re.findall(r"[A-Z]\?", l))>0 or \
len(re.findall(r"[0-9]\?", l))>0 or \
len(re.findall(r"[A-Z][0-9]\' [0-9]", l))>0:
#len(re.findall(r"[0-9]_[0-9]", l))>0 or \
#print(l) #; import sys; sys.exit()
continue
else:
if remove_H and len(re.findall(r" H ", l))>0:
continue
else:
tmp.append(l+'\n')
return listToString(tmp)
def get_similar_cids_from_pubchem(base, MaxRecords):
"""
Args:
base: PubChem CID of Starting chemical
MaxRecords: Number of Similar Compounds
Returns:
List of the CIDs of PubChem compounds similar to the base compound.
"""
import pubchempy as pcp
if type(base) == int: base = str(base)
cids = pcp.get_compounds(base,
searchtype="similarity",
MaxRecords=MaxRecords)
results = []
for x in cids:
csd_codes = search_ccdc_structures(x.cid)
if len(csd_codes)>0:
d = {"cid": x.cid,
"smiles": x.canonical_smiles,
"name": x.iupac_name,
"csd_codes": csd_codes}
results.append(d)
print(d)
return results
def search_csd_code_by_pubchem(cid):
"""
Args:
cid: PubChem cid
Returns:
CIDs that have CCDC crystal structure data
"""
import urllib
import json
from monty.json import MontyDecoder
url0 = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data/compound/'
cid=str(cid)
url = url0 + cid + '/JSON'
csd_codes = []
try:
response = urllib.request.urlopen(url)
except urllib.error.HTTPError:
print("Problem in http connection", url)
return None
except urllib.error.URLError:
print("Problem in parsing", url)
return None
try:
contents = response.read()
if contents.decode().find('CCDC') > 0:
data = json.loads(contents, cls=MontyDecoder)
if 'Section' in data['Record']['Section'][0].keys():
if len(data['Record']['Section'][0]['Section']) == 3:
infos = data['Record']['Section'][0]['Section'][2]['Section'][0]['Information']
for info in infos:
csd_codes.append(info['Value']['StringWithMarkup'][0]['String'])
except:
print('Failed to parse json', url, '\n')
return csd_codes
def search_csd_entries_by_code(code):
"""
Args:
code: CSD code, e.g., ACSALA
Returns:
list of csd ids
"""
from ccdc.search import TextNumericSearch
from ccdc.crystal import PackingSimilarity as PS
def new_cryst(cryst, crysts, n_max):
spg1 = cryst.spacegroup_number_and_setting[0]
for ref in crysts:
spg2 = ref.spacegroup_number_and_setting[0]
if spg1 == spg2:
h = PS().compare(cryst, ref)
#print(cryst.identifier, ref.identifier, h.nmatched_molecules)
if h is not None and h.nmatched_molecules == n_max:
return False
return True
n_max = PS().settings.packing_shell_size
query = TextNumericSearch()
query.add_identifier(code)
hits = query.search()
unique_crysts = []
for hit in hits:
if hit.entry.has_3d_structure and hit.entry.pressure is None:
if new_cryst(hit.crystal, unique_crysts, n_max):
unique_crysts.append(hit.crystal)
#print(hit.entry.identifier, hit.entry.deposition_date)
return [c.identifier for c in unique_crysts]
def get_struc_from__parser(p):
"""
A utility to get the pymatgen structure from the CifParser
Sometimes the cif structure may have repeated atom entries
Args:
p: pymatgen CifParser object
Return:
a single pymatgen structure
"""
from pymatgen.util.coord import find_in_coord_list_pbc
from collections import OrderedDict
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.io.cif import str2float
import numpy as np
def get_matching_coord(coord, ops, atol=1e-4):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in ops:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c, atol=atol)
if len(inds):
return keys[inds[0]]
return False
for i, d in enumerate(p._cif.data.values()):
ops = p.get_symops(d)
coord_to_species = OrderedDict()
d0 = {"_atom_site_label": [],
"_atom_site_fract_x": [],
"_atom_site_fract_y": [],
"_atom_site_fract_z": [],
}
for i in range(len(d["_atom_site_label"])):
try:
symbol = p._parse_symbol(d["_atom_site_type_symbol"][i])
except KeyError:
symbol = p._parse_symbol(d["_atom_site_label"][i])
el = get_el_sp(symbol)
x = str2float(d["_atom_site_fract_x"][i])
y = str2float(d["_atom_site_fract_y"][i])
z = str2float(d["_atom_site_fract_z"][i])
coord = (x, y, z)
match = get_matching_coord(coord, ops)
if not match:
d0['_atom_site_label'].append(el)
d0["_atom_site_fract_x"].append(str(x))
d0["_atom_site_fract_y"].append(str(y))
d0["_atom_site_fract_z"].append(str(z))
coord_to_species[coord] = el
d.data['_atom_site_label'] = d0['_atom_site_label']
d.data['_atom_site_fract_x'] = d0['_atom_site_fract_x']
d.data['_atom_site_fract_y'] = d0['_atom_site_fract_y']
d.data['_atom_site_fract_z'] = d0['_atom_site_fract_z']
s = p._get_structure(d, primitive=False, symmetrized=False)
return s
def Kgrid(struc, Kresol=0.10, dimension=3):
"""
Assign kpoints based on the lattice
"""
a, b, c, alpha, beta, gamma = struc.get_cell_lengths_and_angles()
vol = struc.get_volume()
dist = np.zeros(3);
dist[2] = np.abs(vol/(a*b*np.sin(np.radians(gamma))))
dist[1] = np.abs(vol/(a*c*np.sin(np.radians(beta))))
dist[0] = np.abs(vol/(b*c*np.sin(np.radians(alpha))))
Kpoints = np.ceil(1./(dist*Kresol))
if dimension == 2:
Kpoints[-1] = 1;
#print(a, b, c, alpha, beta, gamma)
#print(vol/(a*b*np.sin(gamma)), a*b, np.sin(gamma))
#print(vol/(a*c*np.sin(beta)), a*c, np.sin(beta))
#print(vol/(b*c*np.sin(alpha)), b*c, np.sin(alpha))
#print(Kpoints)
#import sys; sys.exit()
return Kpoints.astype(int)
def sort_by_dimer(atoms, N_mols, id=10, tol=4.0):
"""
sort the ase atoms' xyz according to dimer
so far only tested on aspirin
Args:
atoms: atoms object from pyxtal
N_mols: number of molecules
id: the refrence atom id
tol: tolerence distance to check if it is a dimer
"""
N_atoms = int(len(atoms)/N_mols)
pos = atoms.get_scaled_positions()
refs = pos[id:len(pos):N_atoms, :]
#print(refs)
# compuate the indices and shift
orders = []
shifts = []
while len(orders) < N_mols:
lefts = [i for i in range(N_mols) if i not in orders]
i = lefts[0]
orders.append(i)
shifts.append(np.zeros(3))
ref_i = refs[i]
good = False
for j in lefts[1:]:
ref_j = refs[j]
dist = ref_j - ref_i
shift = np.round(dist)
dist -= shift
dist = np.linalg.norm(dist.dot(atoms.cell[:]))
if dist < tol:
orders.append(j)
shifts.append(shift)
good = True
break
if not good:
raise RuntimeError('Cannot find match on molecule', i)
else:
print('get', i, j, dist, shift)
pos0 = atoms.get_positions()
pos1 = np.zeros([len(pos), 3])
for i, id in enumerate(orders):
s1, e1 = id*N_atoms, (id+1)*N_atoms
s2, e2 = i*N_atoms, (i+1)*N_atoms
pos1[s2:e2, :] += pos0[s1:e1, :] - shifts[i].dot(atoms.cell[:])
atoms.set_positions(pos1)
return atoms
def generate_wp_lib(spg_list, composition,
num_wp=(None, None),
num_fu=(None, None),
num_dof=(None, None),
N_max=1000):
"""
Generate wps according to the composition constraint (e.g., SiO2)
Args;
- spg_list: list of space group choices
- composition: chemical compositions [1, 2]
- num_wp: (min_wp, max_wp)
- num_fu: (min_fu, max_fu)
- num_dof: (min_dof, max_dof)
Returns:
a list of wps [spg, ([wp1, ...], ... [wp1, ...]), dof]
"""
from pyxtal.symmetry import Group
composition = np.array(composition, dtype=int)
(min_wp, max_wp) = num_wp
(min_fu, max_fu) = num_fu
(min_dof, max_dof) = num_dof
if max_wp is None: max_wp = len(composition)
if min_wp is None: min_wp = len(composition)
if min_dof is None: min_dof = 1
if max_dof is None: max_dof = 1000
#print(max_wp, min_wp)
wps = []
for sg in spg_list:
g = Group(sg)
lat_dof = g.get_lattice_dof()
# determine the upper and lower limit
if min_fu is None: min_fu = max([int(len(g[-1])/min(composition)), 1])
if max_fu is None: max_fu = max([int(len(g[0])/max(composition)), 1])
count = 0
for i in range(max_fu, min_fu-1, -1):
letters, _, wp_ids = g.list_wyckoff_combinations(
composition*i, max_wp=max_wp,
min_wp=min_wp, Nmax=100000)
for j, wp in enumerate(wp_ids):
wp_dofs = 0
num = 0
for wp0 in wp:
for id in wp0:
wp_dofs += g[id].get_dof()
num += g[id].multiplicity
#print(sg, wp, letters[j])
num_dof = lat_dof + wp_dofs
if min_dof <= num_dof <= max_dof:
wps.append((num, sg, wp, lat_dof + wp_dofs))
count += 1
if count >= N_max:
break
return wps
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-f",
dest="file",
help="path of database file"
)
parser.add_argument(
"-i",
dest="id",
help="index of the row",
)
options = parser.parse_args()
ids = options.id
if ids.find(",")>0:
ids = [int(id) for id in ids.split(",")]
else:
ids = [int(ids)]
extract_ase_db(options.file, ids)
|
e5a39b15e634a40bd2973837e2784e6a045a04b9
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/tests/spec/cms/blogs/test_blogs.py
|
f3653c62ef4fa176cfb63a279e20fab0dda730c3
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
test_blogs.py
|
from hubspot import HubSpot
from hubspot.discovery.cms.blogs.discovery import Discovery
def test_is_discoverable():
apis = HubSpot().cms
assert isinstance(apis.blogs, Discovery)
|
0adc8ba34d11570d479c6327c49683d4ef5d6d88
|
b313f1b7b3ae2162a44f416baba8357f0d052003
|
/papers/SubTagger/dataset_readers/BC5CDR_dataset_reader.py
|
93fdcd7f92398dd9a359722e1b9553543fb90eb8
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/vert-papers
|
e7cae3dc790c01447a48caa0456f555120f20e84
|
c47d103d872cf3db2859410211a6083e0d0caf63
|
refs/heads/master
| 2023-08-18T19:42:21.218463
| 2023-08-16T02:21:25
| 2023-08-16T02:21:25
| 198,793,756
| 248
| 93
|
MIT
| 2023-08-16T02:21:27
| 2019-07-25T08:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,880
|
py
|
BC5CDR_dataset_reader.py
|
from typing import Dict, List, Sequence, Iterable, Tuple
import itertools
import logging
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import to_bioul, enumerate_spans
from allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ''
if empty_line:
return True
else:
first_token = line.split()[0]
return False
def _extract_spans(tags: List[str]) -> Dict[Tuple[int, int], str]:
cur_tag = None
cur_start = None
gold_spans = {}
def _save_span(_cur_tag, _cur_start, _cur_id, _gold_spans):
if _cur_start is None:
return _gold_spans
_gold_spans[(_cur_start, _cur_id - 1)] = _cur_tag # inclusive start & end, accord with conll-coref settings
return _gold_spans
# iterate over the tags
# (BIO1 scheme)
for _id, nt in enumerate(tags):
indicator = nt[0]
if indicator == 'B':
gold_spans = _save_span(cur_tag, cur_start, _id, gold_spans)
cur_start = _id
cur_tag = nt[2:]
pass
elif indicator == 'I':
# do nothing
pass
elif indicator == 'O':
gold_spans = _save_span(cur_tag, cur_start, _id, gold_spans)
cur_tag = 'O'
cur_start = _id
pass
_save_span(cur_tag, cur_start, _id+1, gold_spans)
return gold_spans
# adding span information
@DatasetReader.register("BC5CDR")
class BC5CDRDatasetReader(DatasetReader):
_VALID_LABELS = {'ner', 'pos', 'chunk'}
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
coding_scheme: str = "BIOUL",
max_span_width: int = -1,
label_namespace: str = "labels") -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.label_namespace = label_namespace
self.coding_scheme = coding_scheme
self._original_coding_scheme = "IOB1"
if max_span_width != -1:
self._max_span_width = max_span_width
else:
self._max_span_width = None
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
def _to_IOB1(_io_s,_ner_tags):
ans = ["O" for i in range(len(_io_s))]
spans = []
left_end, right_end = None, None
current_type = "None"
for i, (x,y) in enumerate(zip(_io_s,_ner_tags)):
if x == "I":
if current_type != "None" and i > 0:
spans.append((left_end, i-1, current_type))
left_end = i
current_type = y
for (_lend,_rend,_type) in spans:
if _rend-_lend+1 == 1:
ans[_lend] = "B-" + _type
else:
ans[_lend] = "B-" + _type
for i in range(_lend+1,_rend+1):
ans[i] = "I-" + _type
return ans
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group into alternative divider / sentence chunks.
for is_divider, lines in itertools.groupby(data_file, _is_divider):
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider:
fields = [line.strip().split() for line in lines]
# unzipping trick returns tuples, but our Fields need lists
fields = [list(field) for field in zip(*fields)]
tokens_, io_s, ner_tags = fields
# TextField requires ``Token`` objects
tokens = [Token(token) for token in tokens_]
yield self.text_to_instance(tokens, _to_IOB1(io_s, ner_tags))
def text_to_instance(self, # type: ignore
tokens: List[Token],
ner_tags: List[str] = None) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
def _remove_BI(_one_tag):
if _one_tag == 'O':
return _one_tag
else:
return _one_tag[2:]
if self.coding_scheme == "BIOUL":
coded_ner = to_bioul(ner_tags,
encoding=self._original_coding_scheme) if ner_tags is not None else None
else:
# the default IOB1
coded_ner = ner_tags
# TODO:
# ner_tags -> spans of NE
# return something like spans, span_labels ("O" if span not in golden_spans, "PER", "LOC"... otherwise)
spans: List[Field] = []
span_labels: List[str] = []
gold_spans: List[Field] = []
gold_span_labels: List[str] = []
assert len(ner_tags) == len(tokens), "sentence:%s but ner_tags:%s"%(str(tokens), str(ner_tags))
ner_gold_spans = _extract_spans(ner_tags) # ner_gold_spans: Dict[tuple(startid, endid), str(entity_type)]
for start, end in enumerate_spans(ner_tags, offset=0, max_span_width=self._max_span_width):
span_labels.append(ner_gold_spans.get((start, end), 'O'))
spans.append(SpanField(start, end, sequence))
pass
_dict_gold_spans = {}
for ky, val in ner_gold_spans.items():
gold_span_labels.append(val)
gold_spans.append(SpanField(ky[0], ky[1], sequence))
if val != 'O':
_dict_gold_spans[ky] = val
pass
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens] ,
"gold_spans": _dict_gold_spans})
assert len(spans) == len(span_labels), "span length not equal to span label length..."
span_field = ListField(spans) # a list of (start, end) tuples...
# contains all possible spans and their tags
instance_fields['spans'] = span_field
instance_fields['span_labels'] = SequenceLabelField(span_labels, span_field, "span_tags")
# only contain gold_spans and their tags
# e.g. (0,0,O), (1,1,O), (2,3,PER), (4,4,O) for 'I am Donald Trump .'
gold_span_field = ListField(gold_spans)
instance_fields['gold_spans'] = gold_span_field
instance_fields['gold_span_labels'] = SequenceLabelField(gold_span_labels,
gold_span_field, "span_tags")
# Add "tag label" to instance
if self.tag_label == 'ner' and coded_ner is not None:
instance_fields['tags'] = SequenceLabelField(coded_ner, sequence,
'token_tags')
return Instance(instance_fields)
|
d7742cbec0571f2f99748d6769e0f50698e4b34d
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/mlrun/feature_store/ingestion.py
|
d3b9f923646b0c785c004768c1bea362c9c2be89
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 11,210
|
py
|
ingestion.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import pandas as pd
import mlrun
from mlrun.datastore.sources import get_source_from_dict, get_source_step
from mlrun.datastore.targets import (
add_target_steps,
get_target_driver,
validate_target_list,
validate_target_placement,
)
from ..data_types import InferOptions
from ..datastore.store_resources import ResourceCache
from ..runtimes import RuntimeKinds
from ..runtimes.function_reference import FunctionReference
from ..serving.server import MockEvent, create_graph_server
from ..utils import logger, normalize_name
from .feature_set import FeatureSet
def init_featureset_graph(
source,
featureset,
namespace,
targets=None,
return_df=True,
verbose=False,
rows_limit=None,
):
"""create storey ingestion graph/DAG from feature set object"""
cache = ResourceCache()
graph = featureset.spec.graph.copy()
# init targets (and table)
targets = targets or []
server = create_graph_server(graph=graph, parameters={}, verbose=verbose)
server.init_states(context=None, namespace=namespace, resource_cache=cache)
if graph.engine != "sync":
# todo: support rows_limit it storey sources
_add_data_steps(
graph,
cache,
featureset,
targets=targets,
source=source,
return_df=return_df,
context=server.context,
)
server.init_object(namespace)
return graph.wait_for_completion()
else:
# for initialize all the validators of the feature set
cache.cache_resource(featureset.uri, featureset, True)
server.init_object(namespace)
# if the source is a dataframe iterator we load/write it in chunks
chunk_id = 0
if hasattr(source, "to_dataframe"):
if source.is_iterator():
chunk_id = 1
chunks = source.to_dataframe()
else:
chunks = [source.to_dataframe()]
elif not hasattr(source, "to_csv"):
raise mlrun.errors.MLRunInvalidArgumentError("illegal source")
else:
chunks = [source]
entity_columns = list(featureset.spec.entities.keys())
key_fields = entity_columns if entity_columns else None
sizes = [0] * len(targets)
result_dfs = []
total_rows = 0
targets = [get_target_driver(target, featureset) for target in targets]
if featureset.spec.passthrough:
targets = [target for target in targets if not target.is_offline]
for chunk in chunks:
event = MockEvent(body=chunk)
if len(featureset.spec.entities) and isinstance(event.body, pd.DataFrame):
# set the entities to be the indexes of the df
event.body = entities_to_index(featureset, event.body)
df = server.run(event, get_body=True)
if df is not None:
for i, target in enumerate(targets):
size = target.write_dataframe(
df,
key_column=key_fields,
timestamp_key=featureset.spec.timestamp_key,
chunk_id=chunk_id,
)
if size:
sizes[i] += size
chunk_id += 1
result_dfs.append(df)
total_rows += df.shape[0]
if rows_limit and total_rows >= rows_limit:
break
for i, target in enumerate(targets):
target_status = target.update_resource_status("ready", size=sizes[i])
if verbose:
logger.info(f"wrote target: {target_status}")
result_df = pd.concat(result_dfs)
return result_df.head(rows_limit)
def featureset_initializer(server):
"""graph server hook to initialize feature set ingestion graph/DAG"""
context = server.context
cache = server.resource_cache
featureset, source, targets, _, _ = context_to_ingestion_params(context)
graph = featureset.spec.graph.copy()
_add_data_steps(
graph,
cache,
featureset,
targets=targets,
source=source,
context=context,
)
featureset.save()
server.graph = graph
def run_spark_graph(df, featureset, namespace, spark):
"""run spark (sync) pipeline"""
cache = ResourceCache()
graph = featureset.spec.graph.copy()
if graph.engine != "sync":
raise mlrun.errors.MLRunInvalidArgumentError("spark must use sync graph")
for step_dict in graph.steps.values():
if step_dict.class_name in [
"mlrun.feature_store.steps.FeaturesetValidator",
"mlrun.feature_store.steps.SetEventMetadata",
]:
raise mlrun.errors.MLRunRuntimeError(
f"{step_dict.class_name} is not supported for spark engine."
)
server = create_graph_server(graph=graph, parameters={})
server.init_states(context=None, namespace=namespace, resource_cache=cache)
server.init_object(namespace)
server.context.spark = spark
event = MockEvent(body=df)
return server.run(event, get_body=True)
def context_to_ingestion_params(context):
"""extract the ingestion task params from job/serving context"""
featureset_uri = context.get_param("featureset")
featureset = context.get_store_resource(featureset_uri)
infer_options = context.get_param("infer_options", InferOptions.Null)
source = context.get_param("source")
if source:
source = get_source_from_dict(source)
elif featureset.spec.source.to_dict():
source = get_source_from_dict(featureset.spec.source.to_dict())
overwrite = context.get_param("overwrite", None)
targets = context.get_param("targets", None)
if not targets:
targets = featureset.spec.targets
targets = [get_target_driver(target, featureset) for target in targets]
return featureset, source, targets, infer_options, overwrite
def _add_data_steps(
graph, cache, featureset, targets, source, return_df=False, context=None
):
_, default_final_step, _ = graph.check_and_process_graph(allow_empty=True)
validate_target_list(targets=targets)
validate_target_placement(graph, default_final_step, targets)
cache.cache_resource(featureset.uri, featureset, True)
table = add_target_steps(
graph, featureset, targets, to_df=return_df, final_step=default_final_step
)
if table:
cache.cache_table(featureset.uri, table, True)
entity_columns = list(featureset.spec.entities.keys())
key_fields = entity_columns if entity_columns else None
if source is not None:
source = get_source_step(
source,
key_fields=key_fields,
time_field=featureset.spec.timestamp_key,
context=context,
)
graph.set_flow_source(source)
def run_ingestion_job(name, featureset, run_config, schedule=None, spark_service=None):
name = normalize_name(name or f"{featureset.metadata.name}-ingest-job")
use_spark = featureset.spec.engine == "spark"
spark_runtimes = [RuntimeKinds.remotespark, RuntimeKinds.spark]
default_kind = RuntimeKinds.remotespark if use_spark else RuntimeKinds.job
if not run_config.function:
function_ref = featureset.spec.function.copy()
if function_ref.is_empty():
function_ref = FunctionReference(name=name, kind=default_kind)
if not function_ref.url:
function_ref.code = (function_ref.code or "") + _default_job_handler
run_config.function = function_ref
run_config.handler = "handler"
elif run_config.function.kind == RuntimeKinds.spark and spark_service is not None:
raise mlrun.errors.MLRunInvalidArgumentError(
"Spark operator jobs do not support standalone spark submission"
)
image = None if use_spark else mlrun.mlconf.feature_store.default_job_image
function = run_config.to_function(default_kind, image)
if use_spark and function.kind not in spark_runtimes:
raise mlrun.errors.MLRunInvalidArgumentError(
"ingest with spark engine require spark function kind"
)
function.metadata.project = featureset.metadata.project
function.metadata.name = function.metadata.name or name
if not use_spark and not function.spec.image:
raise mlrun.errors.MLRunInvalidArgumentError("function image must be specified")
if use_spark and function.kind == RuntimeKinds.remotespark and not run_config.local:
if not spark_service:
raise mlrun.errors.MLRunInvalidArgumentError(
"Remote spark ingestion requires the spark service name to be provided"
)
else:
function.with_spark_service(spark_service=spark_service)
task = mlrun.new_task(
name=name,
params=run_config.parameters,
handler=run_config.handler,
out_path=featureset.spec.output_path,
)
task.spec.secret_sources = run_config.secret_sources
task.set_label("job-type", "feature-ingest").set_label(
"feature-set", featureset.uri
)
if run_config.owner:
task.set_label("owner", run_config.owner).set_label(
"v3io_user", run_config.owner
)
# set run UID and save in the feature set status (linking the features et to the job)
task.metadata.uid = uuid.uuid4().hex
featureset.status.run_uri = task.metadata.uid
featureset.save()
# when running in server side we want to set the function db connection to the actual DB and not to use the httpdb
function.set_db_connection(featureset._get_run_db())
run = function.run(
task,
schedule=schedule,
local=run_config.local,
watch=run_config.watch,
auth_info=run_config.auth_info,
)
if run_config.watch:
featureset.reload()
return run
def entities_to_index(featureset: FeatureSet, data: pd.DataFrame) -> pd.DataFrame:
entities_names = [
ent.name for ent in featureset.spec.entities if ent.name in data.columns
]
if len(entities_names) > 0:
drop_columns = []
add_indexes = []
for ent_name in entities_names:
if ent_name in data.index.names:
drop_columns.append(ent_name)
else:
add_indexes.append(ent_name)
# drop duplicate columns and indexes
data = data.drop(drop_columns)
# append or reset index (append if index is not default)
append = data.index.names[0] is not None
data = data.set_index(add_indexes, append=append)
return data
_default_job_handler = """
from mlrun.feature_store.api import ingest
def handler(context):
ingest(mlrun_context=context)
"""
|
d02683b9d161706cc9b2d9c744ededaddc7239d4
|
dcc25b784213b17015d2080a7623c772d474dc22
|
/reproduce/AlphaFold2-Chinese/tests/st/mindsponge/test_covid/pres/test_case_covid_pres.py
|
7000f47d1c30fb7985bde03df0f3fa1015408c64
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
mindspore-ai/community
|
930c9d9fdbead852e3597d522a72fe5b66bfc005
|
c72ce898482419117550ad16d93b38298f4306a1
|
refs/heads/master
| 2023-07-19T19:43:20.785198
| 2023-07-17T06:51:22
| 2023-07-17T06:51:22
| 250,693,100
| 193
| 10
|
Apache-2.0
| 2022-10-29T10:01:40
| 2020-03-28T02:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,336
|
py
|
test_case_covid_pres.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test case covid pres"""
import time
import numpy as np
import pytest
from mindspore import context, Tensor
import mindspore.common.dtype as mstype
from mindsponge.md.npt import NPT as Simulation
class ArgsOpt():
"""ArgsOpt"""
def __init__(self):
self.amber_parm = '/home/workspace/mindspore_dataset/mindsponge_data/pres/s1ace2.parm7'
self.box = ''
self.c = '/home/workspace/mindspore_dataset/mindsponge_data/pres/s1ace2_heat.rst7'
self.checkpoint = ''
self.device_id = 0
self.i = '/home/workspace/mindspore_dataset/mindsponge_data/pres/pres.in'
self.o = ''
self.r = ''
self.u = False
self.x = ''
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_case_poly():
"""test_case_covid_min"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
args_opt = ArgsOpt()
simulation = Simulation(args_opt)
for i in range(1, 11):
print_step = 1 if i % simulation.ntwx == 0 or i == 1 or i == simulation.md_info.step_limit else 0
update_step = 1 if (i != 1 and i % simulation.update_interval == 0) else 0
temperature, total_potential_energy, sigma_of_bond_ene, sigma_of_angle_ene, sigma_of_dihedral_ene, \
nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene, _, _, _, _, _, _, _, _ = \
simulation(Tensor(i), Tensor(print_step), Tensor(update_step, mstype.int32))
if i == 1:
start = time.time()
print(temperature, total_potential_energy, sigma_of_bond_ene, sigma_of_angle_ene, sigma_of_dihedral_ene, \
nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene)
assert np.allclose(round(float(temperature.asnumpy()), 3), 298.406, rtol=0.1)
assert np.allclose(round(float(total_potential_energy.asnumpy()), 3), -320432.750, rtol=0.1)
assert np.allclose(round(float(sigma_of_bond_ene.asnumpy()), 3), 4228.548, rtol=0.1)
assert np.allclose(round(float(sigma_of_angle_ene.asnumpy()), 3), 6081.921, rtol=0.1)
assert np.allclose(round(float(sigma_of_dihedral_ene.asnumpy()), 3), 10484.753, rtol=0.1)
assert np.allclose(round(float(nb14_lj_energy_sum.asnumpy()), 3), 2990.386, rtol=0.1)
assert np.allclose(round(float(nb14_cf_energy_sum.asnumpy()), 3), 34394.328, rtol=0.1)
assert np.allclose(round(float(lj_energy_sum.asnumpy()), 3), 36317.559, rtol=0.1)
assert np.allclose(round(float(ee_ene.asnumpy()), 3), -414930.250, rtol=0.1)
end = time.time()
assert ((end - start) / 9) < 0.1
|
eee1c82c03ff86add9ce2e5706c814c6ac193691
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_global_ctc_loss.py
|
5895404f0f9444ac7b09d74d4e49588ec63dbb9c
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 5,842
|
py
|
test_global_ctc_loss.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
import torch
from oneflow.test_utils.automated_test_util.generators import *
from oneflow.test_utils.automated_test_util.torch_flow_dual_object import globaltest
from oneflow.test_utils.test_util import GenArgDict
def log_softmax(logits, axis=0):
max_value = np.max(logits, axis, keepdims=True)
exp = np.exp(logits - max_value)
exp_sum = np.sum(exp, axis, keepdims=True)
dist = exp / exp_sum
return np.log(dist)
def _compare_torch_and_oneflow(
test_case,
torch_ctc_loss,
flow_ctc_loss,
placement,
module_sbp,
in_sbp,
max_input_length,
batch_size,
num_classes,
max_target_length,
):
log_probs = np.random.random(
size=(max_input_length, batch_size, num_classes)
).astype(np.float32)
log_probs = log_softmax(log_probs, axis=2)
targets = np.random.randint(
1, high=num_classes, size=(batch_size, max_target_length), dtype=np.int32
)
input_lengths = np.random.randint(
max_input_length / 2, high=max_input_length, size=(batch_size,), dtype=np.int32
)
target_lengths = np.random.randint(
max_target_length / 2,
high=max_target_length,
size=(batch_size,),
dtype=np.int32,
)
log_probs_torch = torch.tensor(log_probs, dtype=torch.float32, requires_grad=True)
targets_torch = torch.tensor(targets, dtype=torch.int32)
input_lengths_torch = torch.tensor(input_lengths, dtype=torch.int32)
target_lengths_torch = torch.tensor(target_lengths, dtype=torch.int32)
log_probs_flow = (
flow.tensor(log_probs, dtype=flow.float32, requires_grad=True)
.to_global(flow.placement.all("cpu"), flow.sbp.broadcast)
.to_global(placement=placement, sbp=in_sbp)
)
targets_flow = (
flow.tensor(targets, dtype=flow.int32)
.to_global(flow.placement.all("cpu"), flow.sbp.broadcast)
.to_global(placement=placement, sbp=in_sbp)
)
input_lengths_flow = (
flow.tensor(input_lengths, dtype=flow.int32)
.to_global(flow.placement.all("cpu"), flow.sbp.broadcast)
.to_global(placement=placement, sbp=in_sbp)
)
target_lengths_flow = (
flow.tensor(target_lengths, dtype=flow.int32)
.to_global(flow.placement.all("cpu"), flow.sbp.broadcast)
.to_global(placement=placement, sbp=in_sbp)
)
out_torch = torch_ctc_loss(
log_probs_torch, targets_torch, input_lengths_torch, target_lengths_torch
)
out_flow = flow_ctc_loss(
log_probs_flow, targets_flow, input_lengths_flow, target_lengths_flow
)
# check forward
local_output = out_flow.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.allclose(
out_torch.cpu().detach().numpy(),
local_output.numpy(),
rtol=1e-05,
atol=1e-05,
)
)
# check backward
out_torch.sum().backward()
out_flow.sum().backward()
local_x_grad = log_probs_flow.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.allclose(
log_probs_torch.cpu().detach().numpy(),
local_x_grad.numpy(),
rtol=1e-05,
atol=1e-05,
)
)
def _test_ctc_loss_impl(
test_case,
placement,
module_sbp,
in_sbp,
max_input_length,
batch_size,
num_classes,
max_target_length,
blank,
reduction,
zero_infinity,
):
torch_ctc_loss = torch.nn.CTCLoss(
blank=blank, reduction=reduction, zero_infinity=zero_infinity
)
flow_ctc_loss = flow.nn.CTCLoss(
blank=blank, reduction=reduction, zero_infinity=zero_infinity
)
_compare_torch_and_oneflow(
test_case,
torch_ctc_loss,
flow_ctc_loss,
placement,
module_sbp,
in_sbp,
max_input_length,
batch_size,
num_classes,
max_target_length,
)
@flow.unittest.skip_unless_1n2d()
@unittest.skip("skip for now, becase it segfaults several times in CI")
class TestCTCLossGlobal(oneflow.unittest.TestCase):
@globaltest
def test_ctc_loss_global(test_case):
arg_dict = OrderedDict()
arg_dict["max_input_length"] = [20]
arg_dict["batch_size"] = [4]
arg_dict["num_classes"] = [5]
arg_dict["max_target_length"] = [10]
arg_dict["blank"] = [0, 4]
arg_dict["reduction"] = ["mean", "none"]
arg_dict["zero_infinity"] = [False, True]
module_sbp = flow.sbp.broadcast
for args in GenArgDict(arg_dict):
for placement in all_placement():
for in_sbp in all_sbp(placement):
_test_ctc_loss_impl(
test_case, placement, module_sbp, in_sbp, **args
)
if __name__ == "__main__":
unittest.main()
|
afb3cfed6d2d34a9deee7732348ac5e5005eacce
|
cb4f118412a55c52d720bc79e4074606622920ac
|
/arcade/gl/texture.py
|
437818cd16300b10ec5fe99d632495aca464ed67
|
[
"MIT"
] |
permissive
|
pythonarcade/arcade
|
3e536306f0c44f911de149b58958d8b609ffad4b
|
908664efc256697d3098a347f63d217d97841782
|
refs/heads/development
| 2023-08-29T02:53:01.599145
| 2023-08-26T16:54:34
| 2023-08-26T16:54:34
| 49,003,082
| 786
| 215
|
NOASSERTION
| 2023-09-12T18:38:54
| 2016-01-04T14:46:52
|
Python
|
UTF-8
|
Python
| false
| false
| 30,630
|
py
|
texture.py
|
from __future__ import annotations
from ctypes import byref, string_at
import weakref
from typing import Optional, Tuple, Union, TYPE_CHECKING
from pyglet import gl
from .buffer import Buffer
from .utils import data_to_ctypes
from .types import PyGLuint, pixel_formats, BufferOrBufferProtocol
from ..types import BufferProtocol
if TYPE_CHECKING: # handle import cycle caused by type hinting
from arcade.gl import Context
class Texture2D:
"""
An OpenGL 2D texture.
We can create an empty black texture or a texture from byte data.
A texture can also be created with different datatypes such as
float, integer or unsigned integer.
The best way to create a texture instance is through :py:meth:`arcade.gl.Context.texture`
Supported ``dtype`` values are::
# Float formats
'f1': UNSIGNED_BYTE
'f2': HALF_FLOAT
'f4': FLOAT
# int formats
'i1': BYTE
'i2': SHORT
'i4': INT
# uint formats
'u1': UNSIGNED_BYTE
'u2': UNSIGNED_SHORT
'u4': UNSIGNED_INT
:param ctx: The context the object belongs to
:param Tuple[int, int] size: The size of the texture
:param components: The number of components (1: R, 2: RG, 3: RGB, 4: RGBA)
:param dtype: The data type of each component: f1, f2, f4 / i1, i2, i4 / u1, u2, u4
:param data: The texture data (optional). Can be bytes or any object supporting the buffer protocol.
:param filter: The minification/magnification filter of the texture
:param wrap_x: Wrap mode x
:param wrap_y: Wrap mode y
:param target: The texture type (Ignored. Legacy)
:param depth: creates a depth texture if `True`
:param samples: Creates a multisampled texture for values > 0.
This value will be clamped between 0 and the max
sample capability reported by the drivers.
:param immutable: Make the storage (not the contents) immutable. This can sometimes be
required when using textures with compute shaders.
"""
__slots__ = (
"_ctx",
"_glo",
"_width",
"_height",
"_dtype",
"_target",
"_components",
"_alignment",
"_depth",
"_compare_func",
"_format",
"_internal_format",
"_type",
"_component_size",
"_samples",
"_filter",
"_wrap_x",
"_wrap_y",
"_anisotropy",
"_immutable",
"__weakref__",
)
_compare_funcs = {
None: gl.GL_NONE,
"<=": gl.GL_LEQUAL,
"<": gl.GL_LESS,
">=": gl.GL_GEQUAL,
">": gl.GL_GREATER,
"==": gl.GL_EQUAL,
"!=": gl.GL_NOTEQUAL,
"0": gl.GL_NEVER,
"1": gl.GL_ALWAYS,
}
# Swizzle conversion lookup
_swizzle_enum_to_str = {
gl.GL_RED: 'R',
gl.GL_GREEN: 'G',
gl.GL_BLUE: 'B',
gl.GL_ALPHA: 'A',
gl.GL_ZERO: '0',
gl.GL_ONE: '1',
}
_swizzle_str_to_enum = {
'R': gl.GL_RED,
'G': gl.GL_GREEN,
'B': gl.GL_BLUE,
'A': gl.GL_ALPHA,
'0': gl.GL_ZERO,
'1': gl.GL_ONE,
}
def __init__(
self,
ctx: "Context",
size: Tuple[int, int],
*,
components: int = 4,
dtype: str = "f1",
data: Optional[BufferProtocol] = None,
filter: Optional[Tuple[PyGLuint, PyGLuint]] = None,
wrap_x: Optional[PyGLuint] = None,
wrap_y: Optional[PyGLuint] = None,
target=gl.GL_TEXTURE_2D,
depth=False,
samples: int = 0,
immutable: bool = False,
):
self._glo = glo = gl.GLuint()
self._ctx = ctx
self._width, self._height = size
self._dtype = dtype
self._components = components
self._component_size = 0
self._alignment = 1
self._target = target
self._samples = min(max(0, samples), self._ctx.info.MAX_SAMPLES)
self._depth = depth
self._immutable = immutable
self._compare_func: Optional[str] = None
self._anisotropy = 1.0
# Default filters for float and integer textures
# Integer textures should have NEAREST interpolation
# by default 3.3 core doesn't really support it consistently.
if "f" in self._dtype:
self._filter = gl.GL_LINEAR, gl.GL_LINEAR
else:
self._filter = gl.GL_NEAREST, gl.GL_NEAREST
self._wrap_x = gl.GL_REPEAT
self._wrap_y = gl.GL_REPEAT
if self._components not in [1, 2, 3, 4]:
raise ValueError("Components must be 1, 2, 3 or 4")
if data and self._samples > 0:
raise ValueError("Multisampled textures are not writable (cannot be initialized with data)")
self._target = gl.GL_TEXTURE_2D if self._samples == 0 else gl.GL_TEXTURE_2D_MULTISAMPLE
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glGenTextures(1, byref(self._glo))
if self._glo.value == 0:
raise RuntimeError(
"Cannot create Texture. OpenGL failed to generate a texture id"
)
gl.glBindTexture(self._target, self._glo)
self._texture_2d(data)
# Only set texture parameters on non-multisamples textures
if self._samples == 0:
self.filter = filter or self._filter
self.wrap_x = wrap_x or self._wrap_x
self.wrap_y = wrap_y or self._wrap_y
if self._ctx.gc_mode == "auto":
weakref.finalize(self, Texture2D.delete_glo, self._ctx, glo)
self.ctx.stats.incr("texture")
def resize(self, size: Tuple[int, int]):
"""
Resize the texture. This will re-allocate the internal
memory and all pixel data will be lost.
"""
if self._immutable:
raise ValueError("Immutable textures cannot be resized")
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
self._width, self._height = size
self._texture_2d(None)
def __del__(self):
# Intercept garbage collection if we are using Context.gc()
if self._ctx.gc_mode == "context_gc" and self._glo.value > 0:
self._ctx.objects.append(self)
def _texture_2d(self, data):
"""Create a 2D texture"""
# Start by resolving the texture format
try:
format_info = pixel_formats[self._dtype]
except KeyError:
raise ValueError(
f"dype '{self._dtype}' not support. Supported types are : {tuple(pixel_formats.keys())}"
)
_format, _internal_format, self._type, self._component_size = format_info
if data is not None:
byte_length, data = data_to_ctypes(data)
self._validate_data_size(data, byte_length, self._width, self._height)
# If we are dealing with a multisampled texture we have less options
if self._target == gl.GL_TEXTURE_2D_MULTISAMPLE:
gl.glTexImage2DMultisample(
self._target,
self._samples,
_internal_format[self._components],
self._width,
self._height,
True, # Fixed sample locations
)
return
# Make sure we unpack the pixel data with correct alignment
# or we'll end up with corrupted textures
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, self._alignment)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, self._alignment)
# Create depth 2d texture
if self._depth:
gl.glTexImage2D(
self._target,
0, # level
gl.GL_DEPTH_COMPONENT24,
self._width,
self._height,
0,
gl.GL_DEPTH_COMPONENT,
gl.GL_UNSIGNED_INT, # gl.GL_FLOAT,
data,
)
self.compare_func = "<="
# Create normal 2d texture
else:
try:
self._format = _format[self._components]
self._internal_format = _internal_format[self._components]
if self._immutable:
# Specify immutable storage for this texture.
# glTexStorage2D can only be called once
gl.glTexStorage2D(
self._target,
1, # Levels
self._internal_format,
self._width,
self._height,
)
if data:
self.write(data)
else:
# Specify mutable storage for this texture.
# glTexImage2D can be called multiple times to re-allocate storage
gl.glTexImage2D(
self._target, # target
0, # level
self._internal_format, # internal_format
self._width, # width
self._height, # height
0, # border
self._format, # format
self._type, # type
data, # data
)
except gl.GLException as ex:
raise gl.GLException(
(
f"Unable to create texture: {ex} : dtype={self._dtype} "
f"size={self.size} components={self._components} "
f"MAX_TEXTURE_SIZE = {self.ctx.info.MAX_TEXTURE_SIZE}"
)
)
@property
def ctx(self) -> "Context":
"""
The context this texture belongs to
:type: :py:class:`~arcade.gl.Context`
"""
return self._ctx
@property
def glo(self) -> gl.GLuint:
"""
The OpenGL texture id
:type: GLuint
"""
return self._glo
@property
def width(self) -> int:
"""
The width of the texture in pixels
:type: int
"""
return self._width
@property
def height(self) -> int:
"""
The height of the texture in pixels
:type: int
"""
return self._height
@property
def dtype(self) -> str:
"""
The data type of each component
:type: str
"""
return self._dtype
@property
def size(self) -> Tuple[int, int]:
"""
The size of the texture as a tuple
:type: tuple (width, height)
"""
return self._width, self._height
@property
def samples(self) -> int:
"""
Number of samples if multisampling is enabled (read only)
:type: int
"""
return self._samples
@property
def byte_size(self) -> int:
"""
The byte size of the texture.
:type: int
"""
return pixel_formats[self._dtype][3] * self._components * self.width * self.height
@property
def components(self) -> int:
"""
Number of components in the texture
:type: int
"""
return self._components
@property
def component_size(self) -> int:
"""
Size in bytes of each component
:type: int
"""
return self._component_size
@property
def depth(self) -> bool:
"""
If this is a depth texture.
:type: bool
"""
return self._depth
@property
def immutable(self) -> bool:
"""
Does this texture have immutable storage?
:type: bool
"""
return self._immutable
@property
def swizzle(self) -> str:
"""
str: The swizzle mask of the texture (Default ``'RGBA'``).
The swizzle mask change/reorder the ``vec4`` value returned by the ``texture()`` function
in a GLSL shaders. This is represented by a 4 character string were each
character can be::
'R' GL_RED
'G' GL_GREEN
'B' GL_BLUE
'A' GL_ALPHA
'0' GL_ZERO
'1' GL_ONE
Example::
# Alpha channel will always return 1.0
texture.swizzle = 'RGB1'
# Only return the red component. The rest is masked to 0.0
texture.swizzle = 'R000'
# Reverse the components
texture.swizzle = 'ABGR'
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
# Read the current swizzle values from the texture
swizzle_r = gl.GLint()
swizzle_g = gl.GLint()
swizzle_b = gl.GLint()
swizzle_a = gl.GLint()
gl.glGetTexParameteriv(self._target, gl.GL_TEXTURE_SWIZZLE_R, swizzle_r)
gl.glGetTexParameteriv(self._target, gl.GL_TEXTURE_SWIZZLE_G, swizzle_g)
gl.glGetTexParameteriv(self._target, gl.GL_TEXTURE_SWIZZLE_B, swizzle_b)
gl.glGetTexParameteriv(self._target, gl.GL_TEXTURE_SWIZZLE_A, swizzle_a)
swizzle_str = ""
for v in [swizzle_r, swizzle_g, swizzle_b, swizzle_a]:
swizzle_str += self._swizzle_enum_to_str[v.value]
return swizzle_str
@swizzle.setter
def swizzle(self, value: str):
if not isinstance(value, str):
raise ValueError(f"Swizzle must be a string, not '{type(str)}'")
if len(value) != 4:
raise ValueError("Swizzle must be a string of length 4")
swizzle_enums = []
for c in value:
try:
c = c.upper()
swizzle_enums.append(self._swizzle_str_to_enum[c])
except KeyError:
raise ValueError(f"Swizzle value '{c}' invalid. Must be one of RGBA01")
gl.glTexParameteri(self._target, gl.GL_TEXTURE_SWIZZLE_R, swizzle_enums[0])
gl.glTexParameteri(self._target, gl.GL_TEXTURE_SWIZZLE_G, swizzle_enums[1])
gl.glTexParameteri(self._target, gl.GL_TEXTURE_SWIZZLE_B, swizzle_enums[2])
gl.glTexParameteri(self._target, gl.GL_TEXTURE_SWIZZLE_A, swizzle_enums[3])
@property
def filter(self) -> Tuple[int, int]:
"""Get or set the ``(min, mag)`` filter for this texture.
These are rules for how a texture interpolates.
The filter is specified for minification and magnification.
Default value is ``LINEAR, LINEAR``.
Can be set to ``NEAREST, NEAREST`` for pixelated graphics.
When mipmapping is used the min filter needs to be one of the
``MIPMAP`` variants.
Accepted values::
# Enums can be accessed on the context or arcade.gl
NEAREST # Nearest pixel
LINEAR # Linear interpolate
NEAREST_MIPMAP_NEAREST # Minification filter for mipmaps
LINEAR_MIPMAP_NEAREST # Minification filter for mipmaps
NEAREST_MIPMAP_LINEAR # Minification filter for mipmaps
LINEAR_MIPMAP_LINEAR # Minification filter for mipmaps
Also see
* https://www.khronos.org/opengl/wiki/Texture#Mip_maps
* https://www.khronos.org/opengl/wiki/Sampler_Object#Filtering
:type: tuple (min filter, mag filter)
"""
return self._filter
@filter.setter
def filter(self, value: Tuple[int, int]):
if not isinstance(value, tuple) or not len(value) == 2:
raise ValueError("Texture filter must be a 2 component tuple (min, mag)")
self._filter = value
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glTexParameteri(self._target, gl.GL_TEXTURE_MIN_FILTER, self._filter[0])
gl.glTexParameteri(self._target, gl.GL_TEXTURE_MAG_FILTER, self._filter[1])
@property
def wrap_x(self) -> int:
"""
Get or set the horizontal wrapping of the texture. This decides how textures
are read when texture coordinates are outside the ``[0.0, 1.0]`` area.
Default value is ``REPEAT``.
Valid options are::
# Note: Enums can also be accessed in arcade.gl
# Repeat pixels on the y axis
texture.wrap_x = ctx.REPEAT
# Repeat pixels on the y axis mirrored
texture.wrap_x = ctx.MIRRORED_REPEAT
# Repeat the edge pixels when reading outside the texture
texture.wrap_x = ctx.CLAMP_TO_EDGE
# Use the border color (black by default) when reading outside the texture
texture.wrap_x = ctx.CLAMP_TO_BORDER
:type: int
"""
return self._wrap_x
@wrap_x.setter
def wrap_x(self, value: int):
self._wrap_x = value
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glTexParameteri(self._target, gl.GL_TEXTURE_WRAP_S, value)
@property
def wrap_y(self) -> int:
"""
Get or set the horizontal wrapping of the texture. This decides how textures
are read when texture coordinates are outside the ``[0.0, 1.0]`` area.
Default value is ``REPEAT``.
Valid options are::
# Note: Enums can also be accessed in arcade.gl
# Repeat pixels on the x axis
texture.wrap_x = ctx.REPEAT
# Repeat pixels on the x axis mirrored
texture.wrap_x = ctx.MIRRORED_REPEAT
# Repeat the edge pixels when reading outside the texture
texture.wrap_x = ctx.CLAMP_TO_EDGE
# Use the border color (black by default) when reading outside the texture
texture.wrap_x = ctx.CLAMP_TO_BORDER
:type: int
"""
return self._wrap_y
@wrap_y.setter
def wrap_y(self, value: int):
self._wrap_y = value
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glTexParameteri(self._target, gl.GL_TEXTURE_WRAP_T, value)
@property
def anisotropy(self) -> float:
"""
Get or set the anisotropy for this texture.
"""
return self._anisotropy
@anisotropy.setter
def anisotropy(self, value):
self._anisotropy = max(1.0, min(value, self._ctx.info.MAX_TEXTURE_MAX_ANISOTROPY))
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glTexParameterf(self._target, gl.GL_TEXTURE_MAX_ANISOTROPY, self._anisotropy)
@property
def compare_func(self) -> Optional[str]:
"""
Get or set the compare function for a depth texture::
texture.compare_func = None # Disable depth comparison completely
texture.compare_func = '<=' # GL_LEQUAL
texture.compare_func = '<' # GL_LESS
texture.compare_func = '>=' # GL_GEQUAL
texture.compare_func = '>' # GL_GREATER
texture.compare_func = '==' # GL_EQUAL
texture.compare_func = '!=' # GL_NOTEQUAL
texture.compare_func = '0' # GL_NEVER
texture.compare_func = '1' # GL_ALWAYS
:type: str
"""
return self._compare_func
@compare_func.setter
def compare_func(self, value: Union[str, None]):
if not self._depth:
raise ValueError(
"Depth comparison function can only be set on depth textures"
)
if not isinstance(value, str) and value is not None:
raise ValueError(f"value must be as string: {self._compare_funcs.keys()}")
func = self._compare_funcs.get(value, None)
if func is None:
raise ValueError(f"value must be as string: {self._compare_funcs.keys()}")
self._compare_func = value
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
if value is None:
gl.glTexParameteri(self._target, gl.GL_TEXTURE_COMPARE_MODE, gl.GL_NONE)
else:
gl.glTexParameteri(
self._target, gl.GL_TEXTURE_COMPARE_MODE, gl.GL_COMPARE_REF_TO_TEXTURE
)
gl.glTexParameteri(self._target, gl.GL_TEXTURE_COMPARE_FUNC, func)
def read(self, level: int = 0, alignment: int = 1) -> bytes:
"""
Read the contents of the texture.
:param level: The texture level to read
:param alignment: Alignment of the start of each row in memory in number of bytes. Possible values: 1,2,4
"""
if self._samples > 0:
raise ValueError("Multisampled textures cannot be read directly")
if self._ctx.gl_api == "gl":
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, alignment)
buffer = (
gl.GLubyte
* (self.width * self.height * self._component_size * self._components)
)()
gl.glGetTexImage(gl.GL_TEXTURE_2D, level, self._format, self._type, buffer)
return string_at(buffer, len(buffer))
elif self._ctx.gl_api == "gles":
fbo = self._ctx.framebuffer(color_attachments=[self])
return fbo.read(components=self._components, dtype=self._dtype)
else:
raise ValueError("Unknown gl_api: '{self._ctx.gl_api}'")
def write(self, data: BufferOrBufferProtocol, level: int = 0, viewport=None) -> None:
"""Write byte data from the passed source to the texture.
The ``data`` value can be either an
:py:class:`arcade.gl.Buffer` or anything that implements the
`Buffer Protocol <https://docs.python.org/3/c-api/buffer.html>`_.
The latter category includes ``bytes``, ``bytearray``,
``array.array``, and more. You may need to use typing
workarounds for non-builtin types. See
:ref:`prog-guide-gl-buffer-protocol-typing` for more
information.
:param data: :class:`~arcade.gl.Buffer` or
buffer protocol object with
data to write.
:param level: The texture level to write
:param Union[Tuple[int, int], Tuple[int, int, int, int]] viewport:
The area of the texture to write. 2 or 4 component tuple
"""
# TODO: Support writing to layers using viewport + alignment
if self._samples > 0:
raise ValueError("Writing to multisampled textures not supported")
x, y, w, h = 0, 0, self._width, self._height
if viewport:
if len(viewport) == 2:
w, h = viewport
elif len(viewport) == 4:
x, y, w, h = viewport
else:
raise ValueError("Viewport must be of length 2 or 4")
if isinstance(data, Buffer):
gl.glBindBuffer(gl.GL_PIXEL_UNPACK_BUFFER, data.glo)
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexSubImage2D(
self._target, level, x, y, w, h, self._format, self._type, 0
)
gl.glBindBuffer(gl.GL_PIXEL_UNPACK_BUFFER, 0)
else:
byte_size, data = data_to_ctypes(data)
self._validate_data_size(data, byte_size, w, h)
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(self._target, self._glo)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexSubImage2D(
self._target, # target
level, # level
x, # x offset
y, # y offset
w, # width
h, # height
self._format, # format
self._type, # type
data, # pixel data
)
def _validate_data_size(self, byte_data, byte_size, width, height) -> None:
"""Validate the size of the data to be written to the texture"""
expected_size = width * height * self._component_size * self._components
if byte_size != expected_size:
raise ValueError(
f"Data size {len(byte_data)} does not match expected size {expected_size}"
)
if len(byte_data) != byte_size:
raise ValueError(
f"Data size {len(byte_data)} does not match reported size {expected_size}"
)
def build_mipmaps(self, base: int = 0, max_level: int = 1000) -> None:
"""Generate mipmaps for this texture.
The default values usually work well.
Mipmaps are successively smaller versions of an original
texture with special filtering applied. Using mipmaps allows
OpenGL to render scaled versions of original textures with fewer
scaling artifacts.
Mipmaps can be made for textures of any size. Each mipmap
version halves the width and height of the previous one (e.g.
256 x 256, 128 x 128, 64 x 64, etc) down to a minimum of 1 x 1.
.. note:: Mipmaps will only be used if a texture's filter is
configured with a mipmap-type minification::
# Set up linear interpolating minification filter
texture.filter = ctx.LINEAR_MIPMAP_LINEAR, ctx.LINEAR
:param base: Level the mipmaps start at (usually 0)
:param max_level: The maximum number of levels to generate
Also see: https://www.khronos.org/opengl/wiki/Texture#Mip_maps
"""
if self._samples > 0:
raise ValueError("Multisampled textures don't support mimpmaps")
gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)
gl.glBindTexture(gl.GL_TEXTURE_2D, self._glo)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_BASE_LEVEL, base)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAX_LEVEL, max_level)
gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
def delete(self):
"""
Destroy the underlying OpenGL resource.
Don't use this unless you know exactly what you are doing.
"""
Texture2D.delete_glo(self._ctx, self._glo)
self._glo.value = 0
@staticmethod
def delete_glo(ctx: "Context", glo: gl.GLuint):
"""
Destroy the texture.
This is called automatically when the object is garbage collected.
:param ctx: OpenGL Context
:param glo: The OpenGL texture id
"""
# If we have no context, then we are shutting down, so skip this
if gl.current_context is None:
return
if glo.value != 0:
gl.glDeleteTextures(1, byref(glo))
ctx.stats.decr("texture")
def use(self, unit: int = 0) -> None:
"""Bind the texture to a channel,
:param unit: The texture unit to bind the texture.
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + unit)
gl.glBindTexture(self._target, self._glo)
def bind_to_image(self, unit: int, read: bool = True, write: bool = True, level: int = 0):
"""
Bind textures to image units.
Note that either or both ``read`` and ``write`` needs to be ``True``.
The supported modes are: read only, write only, read-write
:param unit: The image unit
:param read: The compute shader intends to read from this image
:param write: The compute shader intends to write to this image
:param level:
"""
if self._ctx.gl_api == "gles" and not self._immutable:
raise ValueError("Textures bound to image units must be created with immutable=True")
access = gl.GL_READ_WRITE
if read and write:
access = gl.GL_READ_WRITE
elif read and not write:
access = gl.GL_READ_ONLY
elif not read and write:
access = gl.GL_WRITE_ONLY
else:
raise ValueError("Illegal access mode. The texture must at least be read or write only")
gl.glBindImageTexture(unit, self._glo, level, 0, 0, access, self._internal_format)
def get_handle(self, resident: bool = True) -> int:
"""
Get a handle for bindless texture access.
Once a handle is created its parameters cannot be changed.
Attempting to do so will have no effect. (filter, wrap etc).
There is no way to undo this immutability.
Handles cannot be used by shaders until they are resident.
This method can be called multiple times to move a texture
in and out of residency::
>> texture.get_handle(resident=False)
4294969856
>> texture.get_handle(resident=True)
4294969856
Ths same handle is returned if the handle already exists.
.. note:: Limitations from the OpenGL wiki
The amount of storage available for resident images/textures may be less
than the total storage for textures that is available. As such, you should
attempt to minimize the time a texture spends being resident. Do not attempt
to take steps like making textures resident/unresident every frame or something.
But if you are finished using a texture for some time, make it unresident.
Keyword Args:
resident (bool): Make the texture resident.
"""
handle = gl.glGetTextureHandleARB(self._glo)
is_resident = gl.glIsTextureHandleResidentARB(handle)
# Ensure we don't try to make a resident texture resident again
if resident:
if not is_resident:
gl.glMakeTextureHandleResidentARB(handle)
else:
if is_resident:
gl.glMakeTextureHandleNonResidentARB(handle)
return handle
def __repr__(self) -> str:
return "<Texture glo={} size={}x{} components={}>".format(
self._glo.value, self._width, self._height, self._components
)
|
0db1818096831704d66198669026f41b1f909cee
|
5917ffcb780cfcfe4e2b87b11fca1f68f387b239
|
/plenum/test/monitoring/test_request_time_tracker.py
|
851b1edef39594e4f12485b19cec356d5be61023
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-plenum
|
6ff9f705af80dfa28d4cb92743683f78bb937aa3
|
698b9500ad3a7a15993af72a1c35a406c5673262
|
refs/heads/main
| 2023-08-29T01:32:26.384729
| 2023-06-20T16:42:11
| 2023-06-20T16:42:11
| 51,585,028
| 171
| 420
|
Apache-2.0
| 2023-06-20T16:42:14
| 2016-02-12T12:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,793
|
py
|
test_request_time_tracker.py
|
import pytest
from plenum.server.monitor import RequestTimeTracker
INSTANCE_COUNT = 4
@pytest.fixture(scope="function")
def req_tracker():
instances = set(range(INSTANCE_COUNT))
removed_replica = INSTANCE_COUNT // 2
instances.remove(removed_replica)
return RequestTimeTracker(instances)
def test_request_tracker_start_adds_request(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
assert digest in req_tracker
assert req_tracker.started(digest) == now
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_handle_makes_request_handled_unordered(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest in req_tracker.handled_unordered()
def test_request_tracker_reset_clears_all_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
req_tracker.reset()
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_order_by_master_makes_request_ordered_and_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
tto = req_tracker.order(0, digest, now + 5)
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_order_by_master_makes_handled_request_ordered_and_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
tto = req_tracker.order(0, digest, now + 5)
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_order_by_backup_returns_time_to_order(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
tto = req_tracker.order(1, digest, now + 5)
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
assert int(tto) == 5
def test_request_tracker_deletes_request_only_when_it_is_ordered_by_all_instances(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
for instId in range(INSTANCE_COUNT - 1):
req_tracker.order(instId, digest, now)
assert digest in req_tracker
req_tracker.order(INSTANCE_COUNT - 1, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_doesnt_wait_for_new_instances_on_old_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.add_instance(INSTANCE_COUNT)
for instId in range(INSTANCE_COUNT):
req_tracker.order(instId, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_waits_for_new_instances_on_new_requests(req_tracker):
digest = "digest"
now = 1.0
req_tracker.add_instance(INSTANCE_COUNT)
req_tracker.start(digest, now)
for instId in range(INSTANCE_COUNT):
req_tracker.order(instId, digest, now)
assert digest in req_tracker
req_tracker.order(INSTANCE_COUNT, digest, now)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_request_tracker_performs_garbage_collection_on_remove_instance(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.order(1, digest, now)
req_tracker.order(2, digest, now)
req_tracker.remove_instance(0)
assert digest in req_tracker
req_tracker.remove_instance(3)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_not_started(req_tracker):
digest = "digest"
req_tracker.force_req_drop(digest)
def test_force_req_drop_started(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest in [digest for digest, _ in req_tracker.unhandled_unordered()]
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_handled(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.handle(digest)
assert digest in req_tracker
assert digest in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest in req_tracker.handled_unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
def test_force_req_drop_between_ordered_master(req_tracker):
digest = "digest"
start_ts = 1.0
now = 3.0
req_tracker.start(digest, start_ts)
tto = req_tracker.order(0, digest, now)
assert tto == 2.0
assert digest not in req_tracker.unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
tto = req_tracker.order(1, digest, now)
assert tto == 0.0
def test_force_req_drop_between_ordered_backup(req_tracker):
digest = "digest"
start_ts = 1.0
now = 3.0
req_tracker.start(digest, start_ts)
tto = req_tracker.order(1, digest, now)
assert tto == 2.0
assert digest in req_tracker.unordered()
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
tto = req_tracker.order(2, digest, now)
assert tto == 0.0
def test_force_req_drop_before_handle(req_tracker):
digest = "digest"
now = 1.0
req_tracker.start(digest, now)
req_tracker.force_req_drop(digest)
assert digest not in req_tracker
assert digest not in req_tracker.unordered()
assert digest not in [digest for digest, _ in req_tracker.unhandled_unordered()]
assert digest not in req_tracker.handled_unordered()
req_tracker.handle(digest)
assert digest not in req_tracker.handled_unordered()
|
18b6bea6030f2732af8fbd44d3fa77c3602c4493
|
73e82424a0c2bbdff890bf6537707fe4d75f054b
|
/scripts/tfmodisco.py
|
036d9db9622d2fec0ce77a0b88e749e18d09b89b
|
[
"MIT"
] |
permissive
|
kundajelab/tfmodisco
|
0bcfe2262927e598a7e880a745722835dfb69ae3
|
01a92d0f07799b504fcb149d5d09c9fd77b9f9a1
|
refs/heads/master
| 2023-05-24T18:47:42.447715
| 2023-05-18T22:15:01
| 2023-05-18T22:15:01
| 62,352,963
| 101
| 27
|
MIT
| 2023-05-18T22:15:02
| 2016-07-01T01:26:15
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,633
|
py
|
tfmodisco.py
|
from __future__ import division, print_function, absolute_import
import modisco
import modisco.core
import argparse
def get_seqlets(task_names, contrib_scores, hypothetical_contribs,
one_hot, coord_producer, overlap_resolver):
contrib_scores_tracks = [
modisco.core.DataTrack(
name=key+"_contrib_scores",
fwd_tracks=contrib_scores[key],
rev_tracks=contrib_scores[key][:,::-1,::-1],
has_pos_axis=True) for key in task_names]
hypothetical_contribs_tracks = [
modisco.core.DataTrack(name=key+"_hypothetical_contribs",
fwd_tracks=hypothetical_contribs[key],
rev_tracks=hypothetical_contribs[key][:,::-1,::-1],
has_pos_axis=True)
for key in task_names]
onehot_track = modisco.core.DataTrack(name="sequence", fwd_tracks=onehot,
rev_tracks=onehot[:,::-1,::-1],
has_pos_axis=True)
track_set = modisco.core.TrackSet(data_tracks=
contrib_scores_tracks+hypothetical_contribs_tracks
+[onehot_track])
per_position_contrib_scores = dict([
(x, np.sum(contrib_scores[x],axis=2)) for x in task_names])
task_name_to_labeler = dict([
(task_name, modisco.core.SignedContribThresholdLabeler(
flank_to_ignore=flank,
name=task_name+"_label",
track_name=task_name+"_contrib_scores"))
for task_name in task_names])
seqlets = modisco.core.MultiTaskSeqletCreation(
coord_producer=coord_producer,
track_set=track_set,
overlap_resolver=overlap_resolver)(
task_name_to_score_track=per_position_contrib_scores,
task_name_to_labeler=task_name_to_labeler)
return seqlets
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--onehot_hdf5", required=True,
help="Path to .hdf5 with one-hot encoded seq data."
+" The data should be stored under a dataset"
+" named 'onehot'")
parser.add_argument("--hypothetical_contribs_hdf5", required=True,
help="Path to the .hdf5 with the hypothetical"
" contribs. The dataset names should correspond"
" to the different tasks to analyze")
parser.add_argument("--clustering_config", required=True,
help="Path to file with clustering config")
|
e9d0fd01c0215ca2a4e61edc5fa064e47065809b
|
0e273c6db269d739dedd33260ece8f039491b8db
|
/tests/test_algo.py
|
f563e74012bae197df99291eecc60db455690d03
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chris-hld/spaudiopy
|
c0b9410a36cfdb1148c0113a7d9dbe27f344ab0c
|
b3bdf8cc45eab66c95392690f03b743265e75e59
|
refs/heads/master
| 2023-08-04T15:45:22.239424
| 2023-07-28T15:16:43
| 2023-07-28T15:16:43
| 169,112,105
| 116
| 9
|
MIT
| 2023-07-07T11:27:08
| 2019-02-04T16:45:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
test_algo.py
|
# -*- coding: utf-8 -*-
"""
pytest
@author: chris
Test algorithms.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
import spaudiopy as spa
# SH Order
N_SPHS = [1, 3, 5] # higher orders might need more tolerance
@pytest.mark.parametrize('test_n_sph', N_SPHS)
def test_sph_filter_bank(test_n_sph):
N_sph = test_n_sph
sec_dirs = spa.utils.cart2sph(*spa.grids.load_t_design(2*N_sph).T)
c_n = spa.sph.maxre_modal_weights(N_sph)
[A, B] = spa.sph.design_sph_filterbank(N_sph, sec_dirs[0], sec_dirs[1],
c_n, 'real', 'perfect')
# diffuse SH signal
in_nm = np.random.randn((N_sph+1)**2, 1000)
# Sector signals (Analysis)
s_sec = A @ in_nm
# Reconstruction to SH domain
out_nm = B @ s_sec
# Perfect Reconstruction
assert_allclose(in_nm, out_nm)
@pytest.mark.parametrize('test_n_sph', N_SPHS)
def test_calculate_grid_weights(test_n_sph):
N_sph = test_n_sph
vecs = spa.grids.load_t_design(degree=2*N_sph)
azi, zen, _ = spa.utils.cart2sph(*vecs.T)
q_weights_t = spa.grids.calculate_grid_weights(azi, zen)
q_weights = 4*np.pi / len(q_weights_t) * np.ones_like(q_weights_t)
# Perfect Reconstruction
assert_allclose(q_weights_t, q_weights)
|
aeef743bf682fdafa0b490637dccbda4f2dcb0f0
|
061ed9ee920b935de7d96067d140e538cb4afd0c
|
/cortex/dataset/viewRGB.py
|
530a486c7d426220bc91cd977969e92a04d7f27f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
gallantlab/pycortex
|
f8144d5d7d899709d596b07e18ea6d2215c4217d
|
e07c4b93d49d9eb9b40c853597b9bba82b4c2738
|
refs/heads/main
| 2023-08-17T22:20:58.589602
| 2023-07-20T18:53:16
| 2023-07-20T18:53:16
| 12,472,709
| 522
| 144
|
BSD-2-Clause
| 2023-09-06T17:25:49
| 2013-08-29T22:33:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 24,394
|
py
|
viewRGB.py
|
import numpy as np
import colorsys
import warnings
from .views import Dataview, Volume, Vertex
from .braindata import VolumeData, VertexData, _hash
from ..database import db
from .. import options
default_cmap = options.config.get("basic", "default_cmap")
class Colors(object):
"""
Set of known colors
"""
RoseRed = (237, 35, 96)
LimeGreen = (141, 198, 63)
SkyBlue = (0, 176, 218)
DodgerBlue = (30, 144, 255)
Red = (255, 000, 000)
Green = (000, 255, 000)
Blue = (000, 000, 255)
def RGB2HSV(color):
"""
Converts RGB to HS
Parameters
----------
color : tuple<uint8, uint8, uint8>
RGB color value
Returns
-------
tuple<int, float, float>
HSV values. Hue in degrees, saturation and value on [0, 1]
"""
hue, saturation, value = colorsys.rgb_to_hsv(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
hue *= 360
return (int(hue), saturation, value)
def HSV2RGB(color):
"""
Converts HSV to RGB
Parameters
----------
color : tuple<int, float, float>
HSV values. Hue in degrees, saturation and value on [0, 1]
Returns
-------
tuple<uint8, uint8, uint8>
RGB color value
"""
r, g, b = colorsys.hsv_to_rgb(color[0] / 360.0, color[1], color[2])
return (int(r * 255), int(g * 255), int(b * 255))
class DataviewRGB(Dataview):
"""Abstract base class for RGB data views.
"""
def __init__(self, subject=None, alpha=None, description="", state=None, **kwargs):
self.alpha = alpha
self.subject = self.red.subject
self.movie = self.red.movie
self.description = description
self.state = state
self.attrs = kwargs
if 'priority' not in self.attrs:
self.attrs['priority'] = 1
# If movie, make sure each channel has the same number of time points
if self.red.movie:
if not self.red.data.shape[0] == self.green.data.shape[0] == self.blue.data.shape[0]:
raise ValueError("For movie data, all three channels have to be the same length")
def uniques(self, collapse=False):
if collapse:
yield self
else:
yield self.red
yield self.green
yield self.blue
if self.alpha is not None:
yield self.alpha
def _write_hdf(self, h5, name="data", xfmname=None):
self._cls._write_hdf(self.red, h5)
self._cls._write_hdf(self.green, h5)
self._cls._write_hdf(self.blue, h5)
alpha = None
if self.alpha is not None:
self._cls._write_hdf(self.alpha, h5)
alpha = self.alpha.name
data = [self.red.name, self.green.name, self.blue.name, alpha]
viewnode = Dataview._write_hdf(self, h5, name=name,
data=[data], xfmname=xfmname)
return viewnode
def to_json(self, simple=False):
sdict = super(DataviewRGB, self).to_json(simple=simple)
if simple:
sdict['name'] = self.name
sdict['subject'] = self.subject
sdict['min'] = 0
sdict['max'] = 255
else:
sdict['data'] = [self.name]
sdict['cmap'] = [default_cmap]
sdict['vmin'] = [0]
sdict['vmax'] = [255]
return sdict
def get_cmapdict(self):
return dict()
class VolumeRGB(DataviewRGB):
"""
Contains RGB (or RGBA) colors for each voxel in a volumetric dataset.
Includes information about the subject and transform for the data.
Three data channels are mapped into a 3D color set. By default the data
channels are mapped on to red, green, and blue. They can also be mapped to
be different colors as specified, and then linearly combined.
Each data channel is represented as a separate Volume object (these can
either be supplied explicitly as Volume objects or implicitly as numpy
arrays). The vmin for each Volume will be mapped to the minimum value for
that data channel, and the vmax will be mapped to the maximum value.
If `shared_range` is True, the vim and vmax will instead computed by
combining all three data channels.
Parameters
----------
channel1 : ndarray or Volume
Array or Volume for the first data channel for each
voxel. Can be a 1D or 3D array (see Volume for details), or a Volume.
channel2 : ndarray or Volume
Array or Volume for the second data channel for each
voxel. Can be a 1D or 3D array (see Volume for details), or a Volume.
channel3 : ndarray or Volume
Array or Volume for the third data channel for or each
voxel. Can be a 1D or 3D array (see Volume for details), or a Volume.
subject : str, optional
Subject identifier. Must exist in the pycortex database. If not given,
red must be a Volume from which the subject can be extracted.
xfmname : str, optional
Transform name. Must exist in the pycortex database. If not given,
red must be a Volume from which the subject can be extracted.
alpha : ndarray or Volume, optional
Array or Volume that represents the alpha component of the color for each
voxel. Can be a 1D or 3D array (see Volume for details), or a Volume. If
None, all voxels will be assumed to have alpha=1.0.
description : str, optional
String describing this dataset. Displayed in webgl viewer.
state : optional
TODO: describe what this is
channel1color : tuple<uint8, uint8, uint8>
RGB color to use for the first data channel
channel2color : tuple<uint8, uint8, uint8>
RGB color to use for the second data channel
channel3color : tuple<uint8, uint8, uint8>
RGB color to use for the third data channel
max_color_value : float [0, 1], optional
Maximum HSV value for voxel colors. If not given, will be the value of
the average of the three channel colors.
max_color_saturation: float [0, 1]
Maximum HSV saturation for voxel colors.
shared_range : bool
Use the same vmin and vmax for all three color channels?
shared_vmin : float, optional
Predetermined shared vmin. Does nothing if shared_range == False. If not given,
will be the 1st percentil of all values across all three channels.
shared_vmax : float, optional
Predetermined shared vmax. Does nothing if shared_range == False. If not given,
will be the 99th percentile of all values across all three channels
**kwargs
All additional arguments in kwargs are passed to the VolumeData and
Dataview.
"""
_cls = VolumeData
def __init__(self, channel1, channel2, channel3, subject=None, xfmname=None, alpha=None, description="",
state=None, channel1color=Colors.Red, channel2color=Colors.Green, channel3color=Colors.Blue,
max_color_value=None, max_color_saturation=1.0, shared_range=False, shared_vmin=None,
shared_vmax=None, **kwargs):
channel1color = tuple(channel1color)
channel2color = tuple(channel2color)
channel3color = tuple(channel3color)
if isinstance(channel1, VolumeData):
if not isinstance(channel2, VolumeData) or channel1.subject != channel2.subject:
raise TypeError("Data channel 2 is not a VolumeData object or is from a different subject")
if not isinstance(channel3, VolumeData) or channel1.subject != channel3.subject:
raise TypeError("Data channel 2 is not a VolumeData object or is from a different subject")
if (subject is not None) and (channel1.subject != subject):
raise ValueError('Subject in VolumeData objects is different than specified subject')
if (channel1color == Colors.Red) and (channel2color == Colors.Green) and (channel3color == Colors.Blue) \
and shared_range is False:
# R/G/B basis can be directly passed through
self.red = channel1
self.green = channel2
self.blue = channel3
self.alpha = alpha
else: # need to remap colors
red, green, blue, alpha = VolumeRGB.color_voxels(
channel1, channel2, channel3,
channel1color, channel2color, channel3color,
max_color_value, max_color_saturation,
shared_range, shared_vmin, shared_vmax, alpha=alpha
)
self.red = Volume(red, channel1.subject, channel1.xfmname)
self.green = Volume(green, channel1.subject, channel1.xfmname)
self.blue = Volume(blue, channel1.subject, channel1.xfmname)
self.alpha = alpha
else:
if subject is None or xfmname is None:
raise TypeError("Subject and xfmname are required")
if (channel1color == Colors.Red) and (channel2color == Colors.Green) and (channel3color == Colors.Blue)\
and shared_range is False:
# R/G/B basis can be directly passed through
self.red = Volume(channel1, subject, xfmname)
self.green = Volume(channel2, subject, xfmname)
self.blue = Volume(channel3, subject, xfmname)
self.alpha = alpha
else: # need to remap colors
red, green, blue, alpha = VolumeRGB.color_voxels(
channel1, channel2, channel3,
channel1color, channel2color, channel3color,
max_color_value, max_color_saturation,
shared_range, shared_vmin, shared_vmax, alpha=alpha
)
self.red = Volume(red, subject, xfmname)
self.green = Volume(green, subject, xfmname)
self.blue = Volume(blue, subject, xfmname)
self.alpha = alpha
if self.red.xfmname == self.green.xfmname == self.blue.xfmname == self.alpha.xfmname:
self.xfmname = self.red.xfmname
else:
raise ValueError('Cannot handle different transforms per volume')
super(VolumeRGB, self).__init__(subject, alpha, description=description, state=state, **kwargs)
@property
def alpha(self):
"""Compute alpha transparency"""
alpha = self._alpha
if alpha is None:
alpha = np.ones(self.red.volume.shape)
alpha = Volume(alpha, self.red.subject, self.red.xfmname, vmin=0, vmax=1)
if not isinstance(alpha, Volume):
if alpha.dtype != np.uint8 and (alpha.min() < 0 or alpha.max() > 1):
warnings.warn(
"Some alpha values are outside the range of [0, 1]. "
"Consider passing a Volume object as alpha with explicit vmin, vmax "
"keyword arguments.",
Warning
)
alpha = Volume(alpha, self.red.subject, self.red.xfmname, vmin=0, vmax=1)
rgb = np.array([self.red.volume, self.green.volume, self.blue.volume])
mask = np.isnan(rgb).any(axis=0)
alpha.volume[mask] = alpha.vmin
return alpha
@alpha.setter
def alpha(self, alpha):
self._alpha = alpha
def to_json(self, simple=False):
sdict = super(VolumeRGB, self).to_json(simple=simple)
if simple:
sdict['shape'] = self.red.shape
else:
sdict['xfm'] = [list(np.array(db.get_xfm(self.subject, self.xfmname, 'coord').xfm).ravel())]
return sdict
@property
def volume(self):
"""5-dimensional volume (t, z, y, x, rgba) with data that has been mapped
into 8-bit unsigned integers that correspond to colors.
"""
volume = []
for dv in (self.red, self.green, self.blue, self.alpha):
if dv.volume.dtype != np.uint8:
vol = dv.volume.astype("float32", copy=True)
if dv.vmin is None:
if vol.min() < 0:
vol -= vol.min()
else:
vol -= dv.vmin
if dv.vmax is None:
if vol.max() > 1:
vol /= vol.max()
else:
vol /= dv.vmax - dv.vmin
vol = (np.clip(vol, 0, 1) * 255).astype(np.uint8)
else:
vol = dv.volume.copy()
volume.append(vol)
return np.array(volume).transpose([1, 2, 3, 4, 0])
def __repr__(self):
return "<RGB volumetric data for (%s, %s)>"%(self.red.subject, self.red.xfmname)
def __hash__(self):
return hash(_hash(self.volume))
@property
def name(self):
return "__%s"%_hash(self.volume)[:16]
def _write_hdf(self, h5, name="data"):
return super(VolumeRGB, self)._write_hdf(h5, name=name, xfmname=[self.xfmname])
@property
def raw(self):
return self
@staticmethod
def color_voxels(channel1, channel2, channel3, channel1color, channel2color,
channel3Color, value_max, saturation_max, common_range,
common_min, common_max, alpha=None):
"""
Colors voxels in 3 color dimensions but not necessarily canonical red, green, and blue
Parameters
----------
channel1 : ndarray or Volume
voxel values for first channel
channel2 : ndarray or Volume
voxel values for second channel
channel3 : ndarray or Volume
voxel values for third channel
channel1color : tuple<uint8, uint8, uint8>
color in RGB for first channel
channel2color : tuple<uint8, uint8, uint8>
color in RGB for second channel
channel3Color : tuple<uint8, uint8, uint8>
color in RGB for third channel
value_max : float, optional
Maximum HSV value for voxel colors. If not given, will be the value of
the average of the three channel colors.
saturation_max : float [0, 1]
Maximum HSV saturation for voxel colors.
common_range : bool
Use the same vmin and vmax for all three color channels?
common_min : float, optional
Predetermined shared vmin. Does nothing if shared_range == False. If not given,
will be the 1st percentile of all values across all three channels.
common_max : float, optional
Predetermined shared vmax. Does nothing if shared_range == False. If not given,
will be the 99th percentile of all values across all three channels
alpha : ndarray or Volume, optional
Alpha values for each voxel. If None, alpha is set to 1 for all voxels.
Returns
-------
red : ndarray of channel1.shape
uint8 array of red values
green : ndarray of channel1.shape
uint8 array of green values
blue : ndarray of channel1.shape
uint8 array of blue values
alpha : ndarray
If alpha=None, uint8 array of alpha values with alpha=1 for every voxel.
Otherwise, the same alpha values that were passed in. Additionally,
voxels with NaNs will have an alpha value of 0.
"""
# normalize each channel to [0, 1]
data1 = channel1.data if isinstance(channel1, VolumeData) else channel1
data1 = data1.astype(float)
data2 = channel2.data if isinstance(channel2, VolumeData) else channel2
data2 = data2.astype(float)
data3 = channel3.data if isinstance(channel3, VolumeData) else channel3
data3 = data3.astype(float)
if (data1.shape != data2.shape) or (data2.shape != data3.shape):
raise ValueError('Volumes are of different shapes')
# Create an alpha mask now, before casting nans to 0
# Voxels with at least one channel equal to NaN will be masked out.
mask = np.isnan(np.array([data1, data2, data3])).any(axis=0)
# Now convert to NaNs to num for all channels
data1 = np.nan_to_num(data1)
data2 = np.nan_to_num(data2)
data3 = np.nan_to_num(data3)
if common_range:
if common_min is None:
if common_max is None:
common_min = np.percentile(np.hstack((data1, data2, data3)), 1)
else:
common_min = 0
if common_max is None:
common_max = np.percentile(np.hstack((data1, data2, data3)), 99)
data1 -= common_min
data2 -= common_min
data3 -= common_min
data1 /= (common_max - common_min)
data2 /= (common_max - common_min)
data3 /= (common_max - common_min)
else:
channelMin = np.percentile(data1, 1)
channelMax = np.percentile(data1, 99)
data1 -= channelMin
data1 /= (channelMax - channelMin)
channelMin = np.percentile(data2, 1)
channelMax = np.percentile(data2, 99)
data2 -= channelMin
data2 /= (channelMax - channelMin)
channelMin = np.percentile(data3, 1)
channelMax = np.percentile(data3, 99)
data3 -= channelMin
data3 /= (channelMax - channelMin)
data1 = np.clip(data1, 0, 1)
data2 = np.clip(data2, 0, 1)
data3 = np.clip(data3, 0, 1)
channel1color = np.array(channel1color)
channel2color = np.array(channel2color)
channel3Color = np.array(channel3Color)
averageColor = (channel1color + channel2color + channel3Color) / 3
if value_max is None:
_, _, value = RGB2HSV(averageColor)
value_max = value
red = np.zeros_like(data1, np.uint8)
green = np.zeros_like(data1, np.uint8)
blue = np.zeros_like(data1, np.uint8)
for i in range(data1.size):
this_color = data1.flat[i] * channel1color + data2.flat[i] * channel2color + data3.flat[i] * channel3Color
this_color /= 3.0
if (value_max != 1.0) or (saturation_max != 1.0):
hue, saturation, value = RGB2HSV(this_color)
saturation /= saturation_max
value /= value_max
if saturation > 1:
saturation = 1.0
if value > 1:
value = 1.0
this_color = HSV2RGB([hue, saturation, value])
red.flat[i] = this_color[0]
green.flat[i] = this_color[1]
blue.flat[i] = this_color[2]
# Now make an alpha volume
if alpha is None:
alpha = np.ones_like(red, np.uint8) * 255
alpha[mask] = 0
return red, green, blue, alpha
class VertexRGB(DataviewRGB):
"""
Contains RGB (or RGBA) colors for each vertex in a surface dataset.
Includes information about the subject.
Each color channel is represented as a separate Vertex object (these can
either be supplied explicitly as Vertex objects or implicitly as np
arrays). The vmin for each Vertex will be mapped to the minimum value for
that color channel, and the vmax will be mapped to the maximum value.
Parameters
----------
red : ndarray or Vertex
Array or Vertex that represents the red component of the color for each
voxel. Can be a 1D or 3D array (see Vertex for details), or a Vertex.
green : ndarray or Vertex
Array or Vertex that represents the green component of the color for each
voxel. Can be a 1D or 3D array (see Vertex for details), or a Vertex.
blue : ndarray or Vertex
Array or Vertex that represents the blue component of the color for each
voxel. Can be a 1D or 3D array (see Vertex for details), or a Vertex.
subject : str, optional
Subject identifier. Must exist in the pycortex database. If not given,
red must be a Vertex from which the subject can be extracted.
alpha : ndarray or Vertex, optional
Array or Vertex that represents the alpha component of the color for each
voxel. Can be a 1D or 3D array (see Vertex for details), or a Vertex. If
None, all vertices will be assumed to have alpha=1.0.
description : str, optional
String describing this dataset. Displayed in webgl viewer.
state : optional
TODO: describe what this is
**kwargs
All additional arguments in kwargs are passed to the VertexData and
Dataview.
"""
_cls = VertexData
blend_curvature = _cls.blend_curvature # hacky inheritance
def __init__(self, red, green, blue, subject=None, alpha=None, description="",
state=None, **kwargs):
if isinstance(red, VertexData):
if not isinstance(green, VertexData) or red.subject != green.subject:
raise TypeError("Invalid data for green channel")
if not isinstance(blue, VertexData) or red.subject != blue.subject:
raise TypeError("Invalid data for blue channel")
self.red = red
self.green = green
self.blue = blue
else:
if subject is None:
raise TypeError("Subject name is required")
self.red = Vertex(red, subject)
self.green = Vertex(green, subject)
self.blue = Vertex(blue, subject)
self.alpha = alpha
super(VertexRGB, self).__init__(subject, alpha, description=description,
state=state, **kwargs)
@property
def alpha(self):
"""Compute alpha transparency"""
alpha = self._alpha
if alpha is None:
alpha = np.ones(self.red.vertices.shape[1])
alpha = Vertex(alpha, self.red.subject, vmin=0, vmax=1)
if not isinstance(alpha, Vertex):
if alpha.dtype != np.uint8 and (alpha.min() < 0 or alpha.max() > 1):
warnings.warn(
"Some alpha values are outside the range of [0, 1]. "
"Consider passing a Vertex object as alpha with explicit vmin, vmax "
"keyword arguments.",
Warning
)
alpha = Vertex(alpha, self.red.subject, vmin=0, vmax=1)
rgb = np.array([self.red.data, self.green.data, self.blue.data])
mask = np.isnan(rgb).any(axis=0)
alpha.data[mask] = alpha.vmin
return alpha
@alpha.setter
def alpha(self, alpha):
self._alpha = alpha
@property
def vertices(self):
"""3-dimensional volume (t, v, rgba) with data that has been mapped
into 8-bit unsigned integers that correspond to colors.
"""
verts = []
for dv in (self.red, self.green, self.blue, self.alpha):
if dv.vertices.dtype != np.uint8:
vert = dv.vertices.astype("float32", copy=True)
if dv.vmin is None:
if vert.min() < 0:
vert -= vert.min()
else:
vert -= dv.vmin
if dv.vmax is None:
if vert.max() > 1:
vert /= vert.max()
else:
vert /= dv.vmax - dv.vmin
vert = (np.clip(vert, 0, 1) * 255).astype(np.uint8)
else:
vert = dv.vertices.copy()
verts.append(vert)
return np.array(verts).transpose([1, 2, 0])
def to_json(self, simple=False):
sdict = super(VertexRGB, self).to_json(simple=simple)
if simple:
sdict.update(dict(split=self.red.llen, frames=self.vertices.shape[0]))
return sdict
@property
def left(self):
return self.vertices[:,:self.red.llen]
@property
def right(self):
return self.vertices[:,self.red.llen:]
def __repr__(self):
return "<RGB vertex data for (%s)>"%(self.subject)
def __hash__(self):
return hash(_hash(self.vertices))
@property
def name(self):
return "__%s"%_hash(self.vertices)[:16]
@property
def raw(self):
return self
|
1da3f9e809777c45eaa48f5b2223ec0d9ead3408
|
ec70ba5d21c21421db4e23b75fe9e0ada2d81a88
|
/Code-Code/ClozeTesting-maxmin/evaluator/evaluator.py
|
e8517ccff6b3245db8d8eef78eaa4a2e8390b4f0
|
[
"MIT",
"Unlicense",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
microsoft/CodeXGLUE
|
069bea1fca4701447fd47006202a16ddf7892501
|
e252e54a74dd55b1294e2379b213b1541dfefaf5
|
refs/heads/main
| 2023-08-08T22:19:33.162334
| 2023-07-31T12:21:09
| 2023-07-31T12:21:09
| 291,656,286
| 1,250
| 342
|
MIT
| 2023-07-31T08:41:12
| 2020-08-31T08:19:02
|
C#
|
UTF-8
|
Python
| false
| false
| 1,829
|
py
|
evaluator.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys, json, os
import numpy as np
import argparse
def read_answers(filename):
answers = {}
with open(filename, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
answers[line.split('<CODESPLIT>')[0]] = line.split('<CODESPLIT>')[1]
return answers
def read_predictions(filename):
predictions = {}
with open(filename, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
predictions[line.split('<CODESPLIT>')[0]] = line.split('<CODESPLIT>')[1]
return predictions
def calculate_scores(answers, predictions):
scores = []
for key in answers:
if key not in predictions:
logging.error("Missing prediction for index {}.".format(key))
sys.exit()
a = answers[key]
p = predictions[key]
scores.append(a==p)
result = sum(scores) / len(scores)
return result
def main():
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for ClozeTest-maxmin dataset.')
parser.add_argument('--answers', '-a', help="directory name of the labels, in txt format.")
parser.add_argument('--predictions', '-p', help="directory name of the leaderboard predictions, in txt format.")
args = parser.parse_args()
for lang in ['ruby', 'javascript', 'go', 'python', 'java', 'php']:
answers = read_answers(os.path.join(args.answers, lang, 'answers.txt'))
predictions = read_predictions(os.path.join(args.predictions, lang, 'predictions.txt'))
acc = calculate_scores(answers, predictions)
print('ClozeTest-maxmin:{}, acc: {}'.format(lang, acc))
if __name__ == '__main__':
main()
|
4c924f5736f577ebbc83390d08ae28ab61753488
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/EventFilter/L1TRawToDigi/python/gmtStage2Digis_cfi.py
|
a6dba0c9c4634f256d2823e45070b84e84885d0b
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
gmtStage2Digis_cfi.py
|
import FWCore.ParameterSet.Config as cms
gmtStage2Digis = cms.EDProducer(
"L1TRawToDigi",
InputLabel = cms.InputTag("rawDataCollector"),
Setup = cms.string("stage2::GMTSetup"),
FedIds = cms.vint32(1402),
)
|
efd61b3f7d566ae11941a69328627a0faf517a1b
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/html_parsing/gametime__use_cubiq_ru/main.py
|
246166d800993cda2f28ee8260bd0ef02f0ccb38
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import re
from dataclasses import dataclass
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
@dataclass
class Time:
text: str
seconds: int
@classmethod
def from_text(cls, value: str) -> "Time":
seconds = to_seconds(value)
return cls(value, seconds)
USER_AGENT = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0"
)
session = requests.Session()
session.headers["User-Agent"] = USER_AGENT
def to_seconds(time_str: str) -> int:
kind_to_seconds = {
"ч": 60 * 60,
"мин": 60,
}
seconds = 0
for value, kind in re.findall(r"(\d+) (ч|мин)", time_str):
if kind not in kind_to_seconds:
raise Exception(f"Неизвестный kind={kind}!")
seconds += int(value) * kind_to_seconds[kind]
return seconds
# SOURCE: https://github.com/gil9red/price_of_games/blob/9311f9cbc6b9e57d0308436e3dbf3e524f23ef74/app_parser/utils.py
def smart_comparing_names(name_1: str, name_2: str) -> bool:
"""
Функция для сравнивания двух названий игр.
Возвращает True, если совпадают, иначе -- False.
"""
# Приведение строк к одному регистру
name_1 = name_1.lower()
name_2 = name_2.lower()
def remove_postfix(text: str) -> str:
for postfix in ("dlc", "expansion"):
if text.endswith(postfix):
return text[: -len(postfix)]
return text
# Удаление символов кроме буквенных, цифр и _: "the witcher®3:___ вася! wild hunt" -> "thewitcher3___васяwildhunt"
def clear_name(name: str) -> str:
return re.sub(r"\W", "", name)
name_1 = clear_name(name_1)
name_1 = remove_postfix(name_1)
name_2 = clear_name(name_2)
name_2 = remove_postfix(name_2)
return name_1 == name_2
def get(url: str) -> dict[str, dict[str, Time]]:
rs = session.get(url)
root = BeautifulSoup(rs.content, "html.parser")
data = {
"title": root.select_one(".entry-header").get_text(strip=True),
}
for li in root.select("ul.game_times > li"):
name = li.h5.get_text(strip=True)
value = li.div.get_text(strip=True)
data[name] = Time.from_text(value)
return data
def find(game: str) -> dict[str, dict[str, Time]] | None:
url_search = "https://cubiq.ru/gametime/?s=" + game
rs = session.get(url_search)
root = BeautifulSoup(rs.content, "html.parser")
for a in root.select(".entry-title > a[href]"):
name = a.get_text(strip=True)
if smart_comparing_names(name, game):
url = urljoin(rs.url, a["href"])
return get(url)
if __name__ == "__main__":
assert to_seconds("25 ч. 18 мин.") == 91080
assert to_seconds("71 ч. 50 мин.") == 258600
assert to_seconds("113 ч.") == 406800
url = "https://cubiq.ru/gametime/age-of-wonders-iii/"
rs = get(url)
print(rs)
# {
# 'title': 'Время прохождения Age of Wonders III',
# 'Основной сюжет': Time(text='25 ч. 18 мин.', seconds=91080),
# 'Cюжет и доп. задания': Time(text='71 ч. 50 мин.', seconds=258600),
# 'Перфекционист': Time(text='113 ч.', seconds=406800)
# }
print()
print(find("dead space"))
# {
# 'title': 'Время прохождения Dead Space',
# 'Основной сюжет': Time(text='11 ч. 10 мин.', seconds=40200),
# 'Cюжет и доп. задания': Time(text='13 ч. 10 мин.', seconds=47400),
# 'Перфекционист': Time(text='20 ч. 41 мин.', seconds=74460)
# }
print()
print(find("dead space 2"))
# {
# 'title': 'Время прохождения Dead Space 2',
# 'Основной сюжет': Time(text='9 ч. 18 мин.', seconds=33480),
# 'Cюжет и доп. задания': Time(text='11 ч. 49 мин.', seconds=42540),
# 'Перфекционист': Time(text='17 ч. 23 мин.', seconds=62580)
# }
|
17892a173fb59e5861d154e0e5e136be6935310b
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/connector/exchange/vertex/vertex_constants.py
|
56ca536b13c91c0f4bfe59340396cc513d77a773
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 9,758
|
py
|
vertex_constants.py
|
from typing import Any, Dict
# A single source of truth for constant variables related to the exchange
from hummingbot.core.api_throttler.data_types import LinkedLimitWeightPair, RateLimit
from hummingbot.core.data_type.in_flight_order import OrderState
# The max size of a digest is 66 characters (Vertex uses digests comprable to client order id).
MAX_ORDER_ID_LEN = 66
HEARTBEAT_TIME_INTERVAL = 30.0
ORDER_BOOK_DEPTH = 100
VERSION = "0.0.1"
EXCHANGE_NAME = "vertex"
DEFAULT_DOMAIN = "vertex"
TESTNET_DOMAIN = "vertex_testnet"
QUOTE = "USDC"
BASE_URLS = {
DEFAULT_DOMAIN: "https://prod.vertexprotocol-backend.com",
TESTNET_DOMAIN: "https://test.vertexprotocol-backend.com",
}
WSS_URLS = {
DEFAULT_DOMAIN: "wss://prod.vertexprotocol-backend.com",
TESTNET_DOMAIN: "wss://test.vertexprotocol-backend.com",
}
CONTRACTS = {
DEFAULT_DOMAIN: "0xbbee07b3e8121227afcfe1e2b82772246226128e",
TESTNET_DOMAIN: "0x5956d6f55011678b2cab217cd21626f7668ba6c5",
}
CHAIN_IDS = {
DEFAULT_DOMAIN: 42161,
TESTNET_DOMAIN: 421613,
}
HBOT_BROKER_ID = ""
SIDE_BUY = "BUY"
SIDE_SELL = "SELL"
TIME_IN_FORCE_GTC = "GTC" # Good till cancelled
TIME_IN_FORCE_IOC = "IOC" # Immediate or cancel
TIME_IN_FORCE_FOK = "FOK" # Fill or kill
TIME_IN_FORCE_POSTONLY = "POSTONLY" # PostOnly
# API PATHS
POST_PATH_URL = "/execute"
QUERY_PATH_URL = "/query"
INDEXER_PATH_URL = "/indexer"
SYMBOLS_PATH_URL = "/symbols"
WS_PATH_URL = "/ws"
WS_SUBSCRIBE_PATH_URL = "/subscribe"
# POST METHODS
PLACE_ORDER_METHOD = "place_order"
PLACE_ORDER_METHOD_NO_LEVERAGE = "place_order_no_leverage"
CANCEL_ORDERS_METHOD = "cancel_orders"
CANCEL_ALL_METHOD = "cancel_product_orders"
# REST QUERY API TYPES
STATUS_REQUEST_TYPE = "status"
ORDER_REQUEST_TYPE = "order"
SUBACCOUNT_INFO_REQUEST_TYPE = "subaccount_info"
MARKET_LIQUIDITY_REQUEST_TYPE = "market_liquidity"
ALL_PRODUCTS_REQUEST_TYPE = "all_products"
MARKET_PRICE_REQUEST_TYPE = "market_price"
FEE_RATES_REQUEST_TYPE = "fee_rates"
CONTRACTS_REQUEST_TYPE = "contracts"
SUBACCOUNT_ORDERS_REQUEST_TYPE = "subaccount_orders"
MAX_WITHDRAWABLE_REQUEST_TYPE = "max_withdrawable"
# WS API ENDPOINTS
WS_SUBSCRIBE_METHOD = "subscribe"
TOB_TOPIC_EVENT_TYPE = "best_bid_offer"
POSITION_CHANGE_EVENT_TYPE = "position_change"
SNAPSHOT_EVENT_TYPE = "market_liquidity"
TRADE_EVENT_TYPE = "trade"
DIFF_EVENT_TYPE = "book_depth"
FILL_EVENT_TYPE = "fill"
POSITION_CHANGE_EVENT_TYPE = "position_change"
# Products
# NOTE: Index 7+ is only on testnet
PRODUCTS = {
0: {
"symbol": "USDC",
"market": None,
DEFAULT_DOMAIN: "0x0000000000000000000000000000000000000000",
TESTNET_DOMAIN: "0x0000000000000000000000000000000000000000",
},
1: {
"symbol": "wBTC",
"market": "wBTC/USDC",
DEFAULT_DOMAIN: "0x70e5911371472e406f1291c621d1c8f207764d73",
TESTNET_DOMAIN: "0x939b0915f9c3b657b9e9a095269a0078dd587491",
},
2: {
"symbol": "BTC-PERP",
"market": "wBTC/USDC",
DEFAULT_DOMAIN: "0xf03f457a30e598d5020164a339727ef40f2b8fbc",
TESTNET_DOMAIN: "0x291b578ff99bfef1706a2018d9dfdd98773e4f3e",
},
3: {
"symbol": "wETH",
"market": "wETH/USDC",
DEFAULT_DOMAIN: "0x1c6281a78aa0ed88949c319cba5f0f0de2ce8353",
TESTNET_DOMAIN: "0x4008c7b762d7000034207bdef628a798065c3dcc",
},
4: {
"symbol": "ETH-PERP",
"market": "wETH/USDC",
DEFAULT_DOMAIN: "0xfe653438a1a4a7f56e727509c341d60a7b54fa91",
TESTNET_DOMAIN: "0xe5106c497f8398ee8d1d6d246f08c125245d19ff",
},
5: {
"symbol": "ARB",
"market": "ARB/USDC",
DEFAULT_DOMAIN: "0xb6304e9a6ca241376a5fc9294daa8fca65ddcdcd",
TESTNET_DOMAIN: "0x49eff6d3de555be7a039d0b86471e3cb454b35de",
},
6: {
"symbol": "ARB-PERP",
"market": "ARB/USDC",
DEFAULT_DOMAIN: "0x01ec802ae0ab1b2cc4f028b9fe6eb954aef06ed1",
TESTNET_DOMAIN: "0xc5f223f12d091fba16141d4eeb5d39c5e0e2577c",
},
# TESTNET
7: {
"symbol": "ARB2",
"market": "ARB2/USDC",
DEFAULT_DOMAIN: None,
TESTNET_DOMAIN: "0xf9144ddc09bd6961cbed631f8be708d2d1e87f57",
},
8: {
"symbol": "ARB-PERP2",
"market": "ARB2/USDC",
DEFAULT_DOMAIN: None,
TESTNET_DOMAIN: "0xa0c85ffadceba288fbcba1dcb780956c01b25cdf",
},
9: {
"symbol": "ARB-PERP2",
"market": "ARB2/USDC",
DEFAULT_DOMAIN: None,
TESTNET_DOMAIN: "0xa0c85ffadceba288fbcba1dcb780956c01b25cdf",
},
10: {
"symbol": "ARB-PERP2",
"market": "ARB2/USDC",
DEFAULT_DOMAIN: None,
TESTNET_DOMAIN: "0xa0c85ffadceba288fbcba1dcb780956c01b25cdf",
},
}
# OrderStates
ORDER_STATE = {
"PendingNew": OrderState.PENDING_CREATE,
"New": OrderState.OPEN,
"Filled": OrderState.FILLED,
"PartiallyFilled": OrderState.PARTIALLY_FILLED,
"Canceled": OrderState.CANCELED,
"Rejected": OrderState.FAILED,
}
# Any call increases call rate in ALL pool, so e.g. a query/execute call will contribute to both ALL and query/execute pools.
ALL_ENDPOINTS_LIMIT = "All"
RATE_LIMITS = [
RateLimit(limit_id=ALL_ENDPOINTS_LIMIT, limit=600, time_interval=10),
RateLimit(
limit_id=INDEXER_PATH_URL, limit=60, time_interval=1, linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)]
),
RateLimit(
limit_id=STATUS_REQUEST_TYPE,
limit=60,
time_interval=1,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=ORDER_REQUEST_TYPE,
limit=60,
time_interval=1,
# NOTE: No weight for weight of 1...
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=SUBACCOUNT_INFO_REQUEST_TYPE,
limit=60,
time_interval=10,
weight=10,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=MARKET_LIQUIDITY_REQUEST_TYPE,
limit=60,
time_interval=1,
# NOTE: No weight for weight of 1...
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=ALL_PRODUCTS_REQUEST_TYPE,
limit=12,
time_interval=1,
weight=5,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=MARKET_PRICE_REQUEST_TYPE,
limit=60,
time_interval=1,
# NOTE: No weight for weight of 1...
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=FEE_RATES_REQUEST_TYPE,
limit=30,
time_interval=1,
weight=2,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=CONTRACTS_REQUEST_TYPE,
limit=60,
time_interval=1,
weight=1,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=SUBACCOUNT_ORDERS_REQUEST_TYPE,
limit=30,
time_interval=1,
weight=2,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=MAX_WITHDRAWABLE_REQUEST_TYPE,
limit=120,
time_interval=10,
weight=5,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
# NOTE: For spot with no leverage, there are different limits.
# Review https://vertex-protocol.gitbook.io/docs/developer-resources/api/websocket-rest-api/executes/place-order
RateLimit(
limit_id=PLACE_ORDER_METHOD,
limit=10,
time_interval=1,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
RateLimit(
limit_id=PLACE_ORDER_METHOD_NO_LEVERAGE,
limit=5,
time_interval=10,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
# NOTE: We're only providing one (1) digest at a time currently.
# https://vertex-protocol.gitbook.io/docs/developer-resources/api/websocket-rest-api/executes/cancel-orders
RateLimit(
limit_id=CANCEL_ORDERS_METHOD,
limit=600,
time_interval=1,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
# NOTE: This isn't currently in use.
# https://vertex-protocol.gitbook.io/docs/developer-resources/api/websocket-rest-api/executes/cancel-product-orders
RateLimit(
limit_id=CANCEL_ALL_METHOD,
limit=2,
time_interval=1,
linked_limits=[LinkedLimitWeightPair(ALL_ENDPOINTS_LIMIT)],
),
]
"""
https://vertex-protocol.gitbook.io/docs/developer-resources/api/api-errors
"""
ERRORS: Dict[int, Any] = {
1000: {
"code": 1000,
"error_value": "RateLimit",
"description": "Too Many Requests: You have exceeded the rate limit. Please reduce your request frequency and try again later.",
"message": "",
},
1001: {
"code": 1001,
"error_value": "BlacklistedAddress",
"description": "This address has been blacklisted from accessing the sequencer due to a violation of the Terms of Service. If you believe this is an error, please contact the Vertex team for assistance.",
"message": "",
},
1002: {
"code": 1002,
"error_value": "BlockedLocation",
"description": "Access from your current location ({location}) is blocked. Please check your location and try again.",
"message": "",
},
1003: {
"code": 1003,
"error_value": "BlockedSubdivision",
"description": "Access from your current location ({location} - {subdivision}) is blocked. Please check your location and try again.",
"message": "",
},
}
|
8925217fd98f9e72e56ad8d004915f7d301d8a9d
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/deploy/pptracking/python/mot/tracker/ocsort_tracker.py
|
02b1028e1a59a74357d3ee8aef2446902458ffd2
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 14,710
|
py
|
ocsort_tracker.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/noahcao/OC_SORT/blob/master/trackers/ocsort_tracker/ocsort.py
"""
import time
import numpy as np
from ..matching.ocsort_matching import associate, linear_assignment, iou_batch, associate_only_iou
from ..motion.ocsort_kalman_filter import OCSORTKalmanFilter
def k_previous_obs(observations, cur_age, k):
if len(observations) == 0:
return [-1, -1, -1, -1, -1]
for i in range(k):
dt = k - i
if cur_age - dt in observations:
return observations[cur_age - dt]
max_age = max(observations.keys())
return observations[max_age]
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h + 1e-6)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x, score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if (score == None):
return np.array(
[x[0] - w / 2., x[1] - h / 2., x[0] + w / 2.,
x[1] + h / 2.]).reshape((1, 4))
else:
score = np.array([score])
return np.array([
x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score
]).reshape((1, 5))
def speed_direction(bbox1, bbox2):
cx1, cy1 = (bbox1[0] + bbox1[2]) / 2.0, (bbox1[1] + bbox1[3]) / 2.0
cx2, cy2 = (bbox2[0] + bbox2[2]) / 2.0, (bbox2[1] + bbox2[3]) / 2.0
speed = np.array([cy2 - cy1, cx2 - cx1])
norm = np.sqrt((cy2 - cy1)**2 + (cx2 - cx1)**2) + 1e-6
return speed / norm
class KalmanBoxTracker(object):
"""
This class represents the internal state of individual tracked objects observed as bbox.
Args:
bbox (np.array): bbox in [x1,y1,x2,y2,score] format.
delta_t (int): delta_t of previous observation
"""
count = 0
def __init__(self, bbox, delta_t=3):
self.kf = OCSORTKalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1., 0, 0, 0, 1., 0, 0], [0, 1., 0, 0, 0, 1., 0],
[0, 0, 1., 0, 0, 0, 1.], [0, 0, 0, 1., 0, 0, 0],
[0, 0, 0, 0, 1., 0, 0], [0, 0, 0, 0, 0, 1., 0],
[0, 0, 0, 0, 0, 0, 1.]])
self.kf.H = np.array([[1., 0, 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0, 0],
[0, 0, 1., 0, 0, 0, 0], [0, 0, 0, 1., 0, 0, 0]])
self.kf.R[2:, 2:] *= 10.
self.kf.P[4:, 4:] *= 1000.
# give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1, -1] *= 0.01
self.kf.Q[4:, 4:] *= 0.01
self.score = bbox[4]
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
"""
NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of
function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a
fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now.
"""
self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder
self.observations = dict()
self.history_observations = []
self.velocity = None
self.delta_t = delta_t
def update(self, bbox, angle_cost=False):
"""
Updates the state vector with observed bbox.
"""
if bbox is not None:
if angle_cost and self.last_observation.sum(
) >= 0: # no previous observation
previous_box = None
for i in range(self.delta_t):
dt = self.delta_t - i
if self.age - dt in self.observations:
previous_box = self.observations[self.age - dt]
break
if previous_box is None:
previous_box = self.last_observation
# """
# Estimate the track speed direction with observations \Delta t steps away
# """
self.velocity = speed_direction(previous_box, bbox)
"""
Insert new observations. This is a ugly way to maintain both self.observations
and self.history_observations. Bear it for the moment.
"""
self.last_observation = bbox
self.observations[self.age] = bbox
self.history_observations.append(bbox)
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
else:
self.kf.update(bbox)
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if ((self.kf.x[6] + self.kf.x[2]) <= 0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if (self.time_since_update > 0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x, score=self.score))
return self.history[-1]
def get_state(self):
return convert_x_to_bbox(self.kf.x, score=self.score)
class OCSORTTracker(object):
"""
OCSORT tracker, support single class
Args:
det_thresh (float): threshold of detection score
max_age (int): maximum number of missed misses before a track is deleted
min_hits (int): minimum hits for associate
iou_threshold (float): iou threshold for associate
delta_t (int): delta_t of previous observation
inertia (float): vdc_weight of angle_diff_cost for associate
vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
bad results. If set <= 0 means no need to filter bboxes,usually set
1.6 for pedestrian tracking.
min_box_area (int): min box area to filter out low quality boxes
use_byte (bool): Whether use ByteTracker, default False
use_angle_cost (bool) Whether use angle cost, default False
"""
def __init__(self,
det_thresh=0.6,
max_age=30,
min_hits=3,
iou_threshold=0.3,
delta_t=3,
inertia=0.2,
vertical_ratio=-1,
min_box_area=0,
use_byte=False,
use_angle_cost=False):
self.det_thresh = det_thresh
self.max_age = max_age
self.min_hits = min_hits
self.iou_threshold = iou_threshold
self.delta_t = delta_t
self.inertia = inertia
self.vertical_ratio = vertical_ratio
self.min_box_area = min_box_area
self.use_byte = use_byte
self.use_angle_cost = use_angle_cost
self.trackers = []
self.frame_count = 0
KalmanBoxTracker.count = 0
def update(self, pred_dets, pred_embs=None):
"""
Args:
pred_dets (np.array): Detection results of the image, the shape is
[N, 6], means 'cls_id, score, x0, y0, x1, y1'.
pred_embs (np.array): Embedding results of the image, the shape is
[N, 128] or [N, 512], default as None.
Return:
tracking boxes (np.array): [M, 6], means 'x0, y0, x1, y1, score, id'.
"""
if pred_dets is None:
return np.empty((0, 6))
self.frame_count += 1
bboxes = pred_dets[:, 2:]
scores = pred_dets[:, 1:2]
dets = np.concatenate((bboxes, scores), axis=1)
scores = scores.squeeze(-1)
inds_low = scores > 0.1
inds_high = scores < self.det_thresh
inds_second = np.logical_and(inds_low, inds_high)
# self.det_thresh > score > 0.1, for second matching
dets_second = dets[inds_second] # detections for second matching
remain_inds = scores > self.det_thresh
dets = dets[remain_inds]
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 5))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
if self.use_angle_cost:
velocities = np.array([
trk.velocity if trk.velocity is not None else np.array((0, 0))
for trk in self.trackers
])
k_observations = np.array([
k_previous_obs(trk.observations, trk.age, self.delta_t)
for trk in self.trackers
])
last_boxes = np.array([trk.last_observation for trk in self.trackers])
"""
First round of association
"""
if self.use_angle_cost:
matched, unmatched_dets, unmatched_trks = associate(
dets, trks, self.iou_threshold, velocities, k_observations,
self.inertia)
else:
matched, unmatched_dets, unmatched_trks = associate_only_iou(
dets, trks, self.iou_threshold)
for m in matched:
self.trackers[m[1]].update(
dets[m[0], :], angle_cost=self.use_angle_cost)
"""
Second round of associaton by OCR
"""
# BYTE association
if self.use_byte and len(dets_second) > 0 and unmatched_trks.shape[
0] > 0:
u_trks = trks[unmatched_trks]
iou_left = iou_batch(
dets_second,
u_trks) # iou between low score detections and unmatched tracks
iou_left = np.array(iou_left)
if iou_left.max() > self.iou_threshold:
"""
NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
get a higher performance especially on MOT17/MOT20 datasets. But we keep it
uniform here for simplicity
"""
matched_indices = linear_assignment(-iou_left)
to_remove_trk_indices = []
for m in matched_indices:
det_ind, trk_ind = m[0], unmatched_trks[m[1]]
if iou_left[m[0], m[1]] < self.iou_threshold:
continue
self.trackers[trk_ind].update(
dets_second[det_ind, :], angle_cost=self.use_angle_cost)
to_remove_trk_indices.append(trk_ind)
unmatched_trks = np.setdiff1d(unmatched_trks,
np.array(to_remove_trk_indices))
if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0:
left_dets = dets[unmatched_dets]
left_trks = last_boxes[unmatched_trks]
iou_left = iou_batch(left_dets, left_trks)
iou_left = np.array(iou_left)
if iou_left.max() > self.iou_threshold:
"""
NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
get a higher performance especially on MOT17/MOT20 datasets. But we keep it
uniform here for simplicity
"""
rematched_indices = linear_assignment(-iou_left)
to_remove_det_indices = []
to_remove_trk_indices = []
for m in rematched_indices:
det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[
1]]
if iou_left[m[0], m[1]] < self.iou_threshold:
continue
self.trackers[trk_ind].update(
dets[det_ind, :], angle_cost=self.use_angle_cost)
to_remove_det_indices.append(det_ind)
to_remove_trk_indices.append(trk_ind)
unmatched_dets = np.setdiff1d(unmatched_dets,
np.array(to_remove_det_indices))
unmatched_trks = np.setdiff1d(unmatched_trks,
np.array(to_remove_trk_indices))
for m in unmatched_trks:
self.trackers[m].update(None)
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i, :], delta_t=self.delta_t)
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
if trk.last_observation.sum() < 0:
d = trk.get_state()[0]
else:
d = trk.last_observation # tlbr + score
if (trk.time_since_update < 1) and (
trk.hit_streak >= self.min_hits or
self.frame_count <= self.min_hits):
# +1 as MOT benchmark requires positive
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1))
i -= 1
# remove dead tracklet
if (trk.time_since_update > self.max_age):
self.trackers.pop(i)
if (len(ret) > 0):
return np.concatenate(ret)
return np.empty((0, 6))
|
cdfd2919b570869456051aecd1a5f6db5ea5f93c
|
c7541429df08b30070aa87eda6aff9e08b601f91
|
/ipware/tests/tests_ipv6.py
|
8ee8fc14bbea52f045dc72736f8c415ccacb9a12
|
[
"MIT"
] |
permissive
|
un33k/django-ipware
|
3489e251c7346fe98e41e685d658f5375314479a
|
930f3293eb11b2bfc221959434e8501c165a2b61
|
refs/heads/master
| 2023-09-04T12:42:49.904637
| 2023-03-08T02:07:32
| 2023-03-08T02:07:32
| 9,659,227
| 864
| 96
|
MIT
| 2023-09-12T16:21:49
| 2013-04-24T22:47:22
|
Python
|
UTF-8
|
Python
| false
| false
| 7,679
|
py
|
tests_ipv6.py
|
# -*- coding: utf-8 -*-
from django.http import HttpRequest
from django.test import TestCase
from ipware import get_client_ip
class IPv4TestCase(TestCase):
"""IP address Test"""
def test_meta_none(self):
request = HttpRequest()
request.META = {}
ip, routable = get_client_ip(request)
self.assertIsNone(ip)
self.assertFalse(routable)
def test_meta_single(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_multi(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'REMOTE_ADDR': '74dc::02bc',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_multi_precedence_order(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '74dc::02be, 74dc::02bf',
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'REMOTE_ADDR': '74dc::02bc',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_proxy_order_left_most(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request, proxy_order='left-most')
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_proxy_order_right_most(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request, proxy_order='right-most')
self.assertEqual(result, ("74dc::02bb", True))
def test_meta_multi_precedence_private_first(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '2001:db8:, ::1',
'X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'REMOTE_ADDR': '74dc::02bc',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_multi_precedence_invalid_first(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': 'unknown, 2001:db8:, ::1',
'X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'REMOTE_ADDR': '74dc::02bc',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_error_only(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': 'unknown, 3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request)
self.assertEqual(result, (None, False))
def test_meta_error_first(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': 'unknown, 3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_singleton(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_singleton_proxy_count(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf',
'HTTP_X_REAL_IP': '74dc::02ba',
}
result = get_client_ip(request, proxy_count=1)
self.assertEqual(result, (None, False))
def test_meta_singleton_proxy_count_private(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '::1',
'HTTP_X_REAL_IP': '3ffe:1900:4545:3:200:f8ff:fe21:67cf',
}
result = get_client_ip(request, proxy_count=1)
self.assertEqual(result, (None, False))
def test_meta_singleton_private_fallback(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '::1',
'HTTP_X_REAL_IP': '3ffe:1900:4545:3:200:f8ff:fe21:67cf',
}
result = get_client_ip(request)
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_proxy_trusted_ips(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request, proxy_trusted_ips=['74dc::02bb'])
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_proxy_trusted_ips_proxy_count(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request, proxy_count=2, proxy_trusted_ips=['74dc::02bb'])
self.assertEqual(result, ("3ffe:1900:4545:3:200:f8ff:fe21:67cf", True))
def test_meta_proxy_trusted_ips_proxy_count_less_error(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02bb',
}
result = get_client_ip(request, proxy_count=2, proxy_trusted_ips=['74dc::02bb'])
self.assertEqual(result, (None, False))
def test_meta_proxy_trusted_ips_proxy_count_more_error(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
}
result = get_client_ip(request, proxy_count=1, proxy_trusted_ips=['74dc::02bb'])
self.assertEqual(result, (None, False))
def test_meta_proxy_trusted_ips_proxy_count_more_error_ignore_fallback(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '3ffe:1900:4545:3:200:f8ff:fe21:67cf, 74dc::02ba, 74dc::02bb',
'HTTP_X_REAL_IP': '74dc::02bb',
}
result = get_client_ip(request, proxy_count=1, proxy_trusted_ips=['74dc::02bb'])
self.assertEqual(result, (None, False))
class IPv6EncapsulationOfIPv4TestCase(TestCase):
"""IPv6 Encapsulation of IPv4 - IP address Test"""
def test_ipv6_encapsulation_of_ipv4_private(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '::ffff:127.0.0.1',
}
result = get_client_ip(request)
self.assertEqual(result, ('127.0.0.1', False))
def test_ipv6_encapsulation_of_ipv4_public(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '::ffff:177.139.233.139',
}
result = get_client_ip(request)
self.assertEqual(result, ('177.139.233.139', True))
|
943be2e5fdc9aceb18aa5c16bf5bc1c9525019d6
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/coghq/DistributedBanquetTable.py
|
4cf3c7303177ffc739ea66896d43aea5f004c73c
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 46,566
|
py
|
DistributedBanquetTable.py
|
import math
import random
from panda3d.core import NodePath, Point3, VBase4, TextNode, Vec3, deg2Rad, CollisionSegment, CollisionHandlerQueue, CollisionNode, BitMask32
from panda3d.direct import SmoothMover
from direct.fsm import FSM
from direct.distributed import DistributedObject
from direct.distributed.ClockDelta import globalClockDelta
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Sequence, ProjectileInterval, Parallel, LerpHprInterval, ActorInterval, Func, Wait, SoundInterval, LerpPosHprInterval, LerpScaleInterval
from direct.gui.DirectGui import DGG, DirectButton, DirectLabel, DirectWaitBar
from direct.task import Task
from toontown.suit import Suit
from toontown.suit import SuitDNA
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.coghq import BanquetTableBase
from toontown.coghq import DinerStatusIndicator
from toontown.battle import MovieUtil
class DistributedBanquetTable(DistributedObject.DistributedObject, FSM.FSM, BanquetTableBase.BanquetTableBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBanquetTable')
rotationsPerSeatIndex = [90,
90,
0,
0,
-90,
-90,
180,
180]
pitcherMinH = -360
pitcherMaxH = 360
rotateSpeed = 30
waterPowerSpeed = base.config.GetDouble('water-power-speed', 15)
waterPowerExponent = base.config.GetDouble('water-power-exponent', 0.75)
useNewAnimations = True
TugOfWarControls = False
OnlyUpArrow = True
if OnlyUpArrow:
BASELINE_KEY_RATE = 3
else:
BASELINE_KEY_RATE = 6
UPDATE_KEY_PRESS_RATE_TASK = 'BanquetTableUpdateKeyPressRateTask'
YELLOW_POWER_THRESHOLD = 0.75
RED_POWER_THRESHOLD = 0.97
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedBanquetTable')
self.boss = None
self.index = -1
self.diners = {}
self.dinerStatus = {}
self.serviceLocs = {}
self.chairLocators = {}
self.sitLocators = {}
self.activeIntervals = {}
self.dinerStatusIndicators = {}
self.preparedForPhaseFour = False
self.avId = 0
self.toon = None
self.pitcherSmoother = SmoothMover()
self.pitcherSmoother.setSmoothMode(SmoothMover.SMOn)
self.smoothStarted = 0
self.__broadcastPeriod = 0.2
self.changeSeq = 0
self.lastChangeSeq = 0
self.pitcherAdviceLabel = None
self.fireLength = 250
self.fireTrack = None
self.hitObject = None
self.setupPowerBar()
self.aimStart = None
self.toonPitcherPosition = Point3(0, -2, 0)
self.allowLocalRequestControl = True
self.fadeTrack = None
self.grabTrack = None
self.gotHitByBoss = False
self.keyTTL = []
self.keyRate = 0
self.buttons = [0, 1]
self.lastPowerFired = 0
self.moveSound = None
self.releaseTrack = None
return
def disable(self):
DistributedObject.DistributedObject.disable(self)
taskMgr.remove(self.triggerName)
taskMgr.remove(self.smoothName)
taskMgr.remove(self.watchControlsName)
taskMgr.remove(self.pitcherAdviceName)
taskMgr.remove(self.posHprBroadcastName)
taskMgr.remove(self.waterPowerTaskName)
if self.releaseTrack:
self.releaseTrack.finish()
self.releaseTrack = None
if self.fireTrack:
self.fireTrack.finish()
self.fireTrack = None
self.cleanupIntervals()
return
def delete(self):
DistributedObject.DistributedObject.delete(self)
self.boss = None
self.ignoreAll()
for indicator in list(self.dinerStatusIndicators.values()):
indicator.delete()
self.dinerStatusIndicators = {}
for diner in list(self.diners.values()):
diner.delete()
self.diners = {}
self.powerBar.destroy()
self.powerBar = None
self.pitcherMoveSfx.stop()
return
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.loadAssets()
self.smoothName = self.uniqueName('pitcherSmooth')
self.pitcherAdviceName = self.uniqueName('pitcherAdvice')
self.posHprBroadcastName = self.uniqueName('pitcherBroadcast')
self.waterPowerTaskName = self.uniqueName('updateWaterPower')
self.triggerName = self.uniqueName('trigger')
self.watchControlsName = self.uniqueName('watchControls')
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
self.boss.setTable(self, self.index)
def setIndex(self, index):
self.index = index
def setState(self, state, avId, extraInfo):
self.gotHitByBoss = extraInfo
if state == 'F':
self.demand('Off')
elif state == 'N':
self.demand('On')
elif state == 'I':
self.demand('Inactive')
elif state == 'R':
self.demand('Free')
elif state == 'C':
self.demand('Controlled', avId)
elif state == 'L':
self.demand('Flat', avId)
else:
self.notify.error('Invalid state from AI: %s' % state)
def setNumDiners(self, numDiners):
self.numDiners = numDiners
def setDinerInfo(self, hungryDurations, eatingDurations, dinerLevels):
self.dinerInfo = {}
for i in range(len(hungryDurations)):
hungryDur = hungryDurations[i]
eatingDur = eatingDurations[i]
dinerLevel = dinerLevels[i]
self.dinerInfo[i] = (hungryDur, eatingDur, dinerLevel)
def loadAssets(self):
self.tableGroup = loader.loadModel('phase_12/models/bossbotHQ/BanquetTableChairs')
tableLocator = self.boss.geom.find('**/TableLocator_%d' % (self.index + 1))
if tableLocator.isEmpty():
self.tableGroup.reparentTo(render)
self.tableGroup.setPos(0, 75, 0)
else:
self.tableGroup.reparentTo(tableLocator)
self.tableGeom = self.tableGroup.find('**/Geometry')
self.setupDiners()
self.setupChairCols()
self.squirtSfx = loader.loadSfx('phase_4/audio/sfx/AA_squirt_seltzer_miss.ogg')
self.hitBossSfx = loader.loadSfx('phase_5/audio/sfx/SA_watercooler_spray_only.ogg')
self.hitBossSoundInterval = SoundInterval(self.hitBossSfx, node=self.boss, volume=1.0)
self.serveFoodSfx = loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_bell_for_trolley.ogg')
self.pitcherMoveSfx = base.loader.loadSfx('phase_4/audio/sfx/MG_cannon_adjust.ogg')
def setupDiners(self):
for i in range(self.numDiners):
newDiner = self.createDiner(i)
self.diners[i] = newDiner
self.dinerStatus[i] = self.HUNGRY
def createDiner(self, i):
diner = Suit.Suit()
diner.dna = SuitDNA.SuitDNA()
level = self.dinerInfo[i][2]
level -= 4
diner.dna.newSuitRandom(level=level, dept='c')
diner.setDNA(diner.dna)
if self.useNewAnimations:
diner.loop('sit', fromFrame=i)
else:
diner.pose('landing', 0)
locator = self.tableGroup.find('**/chair_%d' % (i + 1))
locatorScale = locator.getNetTransform().getScale()[0]
correctHeadingNp = locator.attachNewNode('correctHeading')
self.chairLocators[i] = correctHeadingNp
heading = self.rotationsPerSeatIndex[i]
correctHeadingNp.setH(heading)
sitLocator = correctHeadingNp.attachNewNode('sitLocator')
base.sitLocator = sitLocator
pos = correctHeadingNp.getPos(render)
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
sitLocator.setPos(0.5, 3.65, -3.75)
else:
sitLocator.setZ(-2.4)
sitLocator.setY(2.5)
sitLocator.setX(0.5)
self.sitLocators[i] = sitLocator
diner.setScale(1.0 / locatorScale)
diner.reparentTo(sitLocator)
newLoc = NodePath('serviceLoc-%d-%d' % (self.index, i))
newLoc.reparentTo(correctHeadingNp)
newLoc.setPos(0, 3.0, 1)
self.serviceLocs[i] = newLoc
base.serviceLoc = newLoc
head = diner.find('**/joint_head')
newIndicator = DinerStatusIndicator.DinerStatusIndicator(parent=head, pos=Point3(0, 0, 3.5), scale=5.0)
newIndicator.wrtReparentTo(diner)
self.dinerStatusIndicators[i] = newIndicator
return diner
def setupChairCols(self):
for i in range(self.numDiners):
chairCol = self.tableGroup.find('**/collision_chair_%d' % (i + 1))
colName = 'ChairCol-%d-%d' % (self.index, i)
chairCol.setTag('chairIndex', str(i))
chairCol.setName(colName)
chairCol.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + colName, self.touchedChair)
def touchedChair(self, colEntry):
chairIndex = int(colEntry.getIntoNodePath().getTag('chairIndex'))
if chairIndex in self.dinerStatus:
status = self.dinerStatus[chairIndex]
if status in (self.HUNGRY, self.ANGRY):
self.boss.localToonTouchedChair(self.index, chairIndex)
def serveFood(self, food, chairIndex):
self.removeFoodModel(chairIndex)
serviceLoc = self.serviceLocs.get(chairIndex)
if not food or food.isEmpty():
foodModel = loader.loadModel('phase_12/models/bossbotHQ/canoffood')
foodModel.setScale(ToontownGlobals.BossbotFoodModelScale)
foodModel.reparentTo(serviceLoc)
else:
food.wrtReparentTo(serviceLoc)
tray = food.find('**/tray')
if not tray.isEmpty():
tray.hide()
ivalDuration = 1.5
foodMoveIval = Parallel(SoundInterval(self.serveFoodSfx, node=food), ProjectileInterval(food, duration=ivalDuration, startPos=food.getPos(serviceLoc), endPos=serviceLoc.getPos(serviceLoc)), LerpHprInterval(food, ivalDuration, Point3(0, -360, 0)))
intervalName = 'serveFood-%d-%d' % (self.index, chairIndex)
foodMoveIval.start()
self.activeIntervals[intervalName] = foodMoveIval
def setDinerStatus(self, chairIndex, status):
if chairIndex in self.dinerStatus:
oldStatus = self.dinerStatus[chairIndex]
self.dinerStatus[chairIndex] = status
if oldStatus != status:
if status == self.EATING:
self.changeDinerToEating(chairIndex)
elif status == self.HUNGRY:
self.changeDinerToHungry(chairIndex)
elif status == self.ANGRY:
self.changeDinerToAngry(chairIndex)
elif status == self.DEAD:
self.changeDinerToDead(chairIndex)
elif status == self.HIDDEN:
self.changeDinerToHidden(chairIndex)
def removeFoodModel(self, chairIndex):
serviceLoc = self.serviceLocs.get(chairIndex)
if serviceLoc:
for i in range(serviceLoc.getNumChildren()):
serviceLoc.getChild(0).removeNode()
def changeDinerToEating(self, chairIndex):
indicator = self.dinerStatusIndicators.get(chairIndex)
eatingDuration = self.dinerInfo[chairIndex][1]
if indicator:
indicator.request('Eating', eatingDuration)
diner = self.diners[chairIndex]
intervalName = 'eating-%d-%d' % (self.index, chairIndex)
eatInTime = 32.0 / 24.0
eatOutTime = 21.0 / 24.0
eatLoopTime = 19 / 24.0
rightHand = diner.getRightHand()
waitTime = 5
loopDuration = eatingDuration - eatInTime - eatOutTime - waitTime
serviceLoc = self.serviceLocs[chairIndex]
def foodAttach(self = self, diner = diner):
if not self.serviceLocs[chairIndex].getNumChildren():
return
foodModel = self.serviceLocs[chairIndex].getChild(0)
(foodModel.reparentTo(diner.getRightHand()),)
(foodModel.setHpr(Point3(0, -94, 0)),)
(foodModel.setPos(Point3(-0.15, -0.7, -0.4)),)
scaleAdj = 1
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
scaleAdj = 0.6
(foodModel.setPos(Point3(0.1, -0.25, -0.31)),)
else:
scaleAdj = 0.8
(foodModel.setPos(Point3(-0.25, -0.85, -0.34)),)
oldScale = foodModel.getScale()
newScale = oldScale * scaleAdj
foodModel.setScale(newScale)
def foodDetach(self = self, diner = diner):
if not diner.getRightHand().getNumChildren():
return
foodModel = diner.getRightHand().getChild(0)
(foodModel.reparentTo(serviceLoc),)
(foodModel.setPosHpr(0, 0, 0, 0, 0, 0),)
scaleAdj = 1
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
scaleAdj = 0.6
else:
scakeAdj = 0.8
oldScale = foodModel.getScale()
newScale = oldScale / scaleAdj
foodModel.setScale(newScale)
eatIval = Sequence(ActorInterval(diner, 'sit', duration=waitTime), ActorInterval(diner, 'sit-eat-in', startFrame=0, endFrame=6), Func(foodAttach), ActorInterval(diner, 'sit-eat-in', startFrame=6, endFrame=32), ActorInterval(diner, 'sit-eat-loop', duration=loopDuration, loop=1), ActorInterval(diner, 'sit-eat-out', startFrame=0, endFrame=12), Func(foodDetach), ActorInterval(diner, 'sit-eat-out', startFrame=12, endFrame=21))
eatIval.start()
self.activeIntervals[intervalName] = eatIval
def changeDinerToHungry(self, chairIndex):
intervalName = 'eating-%d-%d' % (self.index, chairIndex)
if intervalName in self.activeIntervals:
self.activeIntervals[intervalName].finish()
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Hungry', self.dinerInfo[chairIndex][0])
diner = self.diners[chairIndex]
if random.choice([0, 1]):
diner.loop('sit-hungry-left')
else:
diner.loop('sit-hungry-right')
def changeDinerToAngry(self, chairIndex):
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Angry')
diner = self.diners[chairIndex]
diner.loop('sit-angry')
def changeDinerToDead(self, chairIndex):
def removeDeathSuit(suit, deathSuit):
if not deathSuit.isEmpty():
deathSuit.detachNode()
suit.cleanupLoseActor()
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Dead')
diner = self.diners[chairIndex]
deathSuit = diner
locator = self.tableGroup.find('**/chair_%d' % (chairIndex + 1))
deathSuit = diner.getLoseActor()
ival = Sequence(Func(self.notify.debug, 'before actorinterval sit-lose'), ActorInterval(diner, 'sit-lose'), Func(self.notify.debug, 'before deathSuit.setHpr'), Func(deathSuit.setHpr, diner.getHpr()), Func(self.notify.debug, 'before diner.hide'), Func(diner.hide), Func(self.notify.debug, 'before deathSuit.reparentTo'), Func(deathSuit.reparentTo, self.chairLocators[chairIndex]), Func(self.notify.debug, 'befor ActorInterval lose'), ActorInterval(deathSuit, 'lose', duration=MovieUtil.SUIT_LOSE_DURATION), Func(self.notify.debug, 'before remove deathsuit'), Func(removeDeathSuit, diner, deathSuit, name='remove-death-suit-%d-%d' % (chairIndex, self.index)), Func(self.notify.debug, 'diner.stash'), Func(diner.stash))
spinningSound = base.loader.loadSfx('phase_3.5/audio/sfx/Cog_Death.ogg')
deathSound = base.loader.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.ogg')
deathSoundTrack = Sequence(Wait(0.8), SoundInterval(spinningSound, duration=1.2, startTime=1.5, volume=0.2, node=deathSuit), SoundInterval(spinningSound, duration=3.0, startTime=0.6, volume=0.8, node=deathSuit), SoundInterval(deathSound, volume=0.32, node=deathSuit))
intervalName = 'dinerDie-%d-%d' % (self.index, chairIndex)
deathIval = Parallel(ival, deathSoundTrack)
deathIval.start()
self.activeIntervals[intervalName] = deathIval
def changeDinerToHidden(self, chairIndex):
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Inactive')
diner = self.diners[chairIndex]
diner.hide()
def setAllDinersToSitNeutral(self):
startFrame = 0
for diner in list(self.diners.values()):
if not diner.isHidden():
diner.loop('sit', fromFrame=startFrame)
startFrame += 1
def cleanupIntervals(self):
for interval in list(self.activeIntervals.values()):
interval.finish()
self.activeIntervals = {}
def clearInterval(self, name, finish = 1):
if name in self.activeIntervals:
ival = self.activeIntervals[name]
if finish:
ival.finish()
else:
ival.pause()
if name in self.activeIntervals:
del self.activeIntervals[name]
else:
self.notify.debug('interval: %s already cleared' % name)
def finishInterval(self, name):
if name in self.activeIntervals:
interval = self.activeIntervals[name]
interval.finish()
def getNotDeadInfo(self):
notDeadList = []
for i in range(self.numDiners):
if self.dinerStatus[i] != self.DEAD:
notDeadList.append((self.index, i, 12))
return notDeadList
def enterOn(self):
pass
def exitOn(self):
pass
def enterInactive(self):
for chairIndex in range(self.numDiners):
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Inactive')
self.removeFoodModel(chairIndex)
def exitInactive(self):
pass
def enterFree(self):
self.resetPowerBar()
if self.fadeTrack:
self.fadeTrack.finish()
self.fadeTrack = None
self.prepareForPhaseFour()
if self.avId == localAvatar.doId:
self.tableGroup.setAlphaScale(0.3)
self.tableGroup.setTransparency(1)
taskMgr.doMethodLater(5, self.__allowDetect, self.triggerName)
self.fadeTrack = Sequence(Func(self.tableGroup.setTransparency, 1), self.tableGroup.colorScaleInterval(0.2, VBase4(1, 1, 1, 0.3)))
self.fadeTrack.start()
self.allowLocalRequestControl = False
else:
self.allowLocalRequestControl = True
self.avId = 0
return
def exitFree(self):
pass
def touchedTable(self, colEntry):
tableIndex = int(colEntry.getIntoNodePath().getTag('tableIndex'))
if self.state == 'Free' and self.avId == 0 and self.allowLocalRequestControl:
self.d_requestControl()
def prepareForPhaseFour(self):
if not self.preparedForPhaseFour:
for i in range(8):
chair = self.tableGroup.find('**/chair_%d' % (i + 1))
if not chair.isEmpty():
chair.hide()
colChairs = self.tableGroup.findAllMatches('**/ChairCol*')
for i in range(colChairs.getNumPaths()):
col = colChairs.getPath(i)
col.stash()
colChairs = self.tableGroup.findAllMatches('**/collision_chair*')
for i in range(colChairs.getNumPaths()):
col = colChairs.getPath(i)
col.stash()
tableCol = self.tableGroup.find('**/collision_table')
colName = 'TableCol-%d' % self.index
tableCol.setTag('tableIndex', str(self.index))
tableCol.setName(colName)
tableCol.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.BanquetTableBitmask)
self.accept('enter' + colName, self.touchedTable)
self.preparedForPhaseFour = True
self.waterPitcherModel = loader.loadModel('phase_12/models/bossbotHQ/tt_m_ara_bhq_seltzerBottle')
lampNode = self.tableGroup.find('**/lamp_med_5')
pos = lampNode.getPos(self.tableGroup)
lampNode.hide()
bottleLocator = self.tableGroup.find('**/bottle_locator')
pos = bottleLocator.getPos(self.tableGroup)
self.waterPitcherNode = self.tableGroup.attachNewNode('pitcherNode')
self.waterPitcherNode.setPos(pos)
self.waterPitcherModel.reparentTo(self.waterPitcherNode)
self.waterPitcherModel.ls()
self.nozzle = self.waterPitcherModel.find('**/nozzle_tip')
self.handLocator = self.waterPitcherModel.find('**/hand_locator')
self.handPos = self.handLocator.getPos()
def d_requestControl(self):
self.sendUpdate('requestControl')
def d_requestFree(self, gotHitByBoss):
self.sendUpdate('requestFree', [gotHitByBoss])
def enterControlled(self, avId):
self.prepareForPhaseFour()
self.avId = avId
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.toon = toon
self.grabTrack = self.makeToonGrabInterval(toon)
self.notify.debug('grabTrack=%s' % self.grabTrack)
self.pitcherCamPos = Point3(0, -50, 40)
self.pitcherCamHpr = Point3(0, -21, 0)
if avId == localAvatar.doId:
self.boss.toMovieMode()
self.__enableControlInterface()
self.startPosHprBroadcast()
self.grabTrack = Sequence(self.grabTrack, Func(camera.wrtReparentTo, localAvatar), LerpPosHprInterval(camera, 1, self.pitcherCamPos, self.pitcherCamHpr), Func(self.boss.toCraneMode))
if self.TugOfWarControls:
self.__spawnUpdateKeyPressRateTask()
self.accept('exitCrane', self.gotBossZapped)
else:
self.startSmooth()
toon.stopSmooth()
self.grabTrack.start()
def exitControlled(self):
self.ignore('exitCrane')
if self.grabTrack:
self.grabTrack.finish()
self.grabTrack = None
nextState = self.getCurrentOrNextState()
self.notify.debug('nextState=%s' % nextState)
if nextState == 'Flat':
place = base.cr.playGame.getPlace()
self.notify.debug('%s' % place.fsm)
if self.avId == localAvatar.doId:
self.__disableControlInterface()
else:
if self.toon and not self.toon.isDisabled():
self.toon.loop('neutral')
self.toon.startSmooth()
self.releaseTrack = self.makeToonReleaseInterval(self.toon)
self.stopPosHprBroadcast()
self.stopSmooth()
if self.avId == localAvatar.doId:
localAvatar.wrtReparentTo(render)
self.__disableControlInterface()
camera.reparentTo(base.localAvatar)
camera.setPos(base.localAvatar.cameraPositions[0][0])
camera.setHpr(0, 0, 0)
self.goToFinalBattle()
self.safeBossToFinalBattleMode()
else:
toon = base.cr.doId2do.get(self.avId)
if toon:
toon.wrtReparentTo(render)
self.releaseTrack.start()
return
def safeBossToFinalBattleMode(self):
if self.boss:
self.boss.toFinalBattleMode()
def goToFinalBattle(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
if place.fsm.getCurrentState().getName() == 'crane':
place.setState('finalBattle')
def makeToonGrabInterval(self, toon):
toon.pose('leverNeutral', 0)
toon.update()
rightHandPos = toon.rightHand.getPos(toon)
self.toonPitcherPosition = Point3(self.handPos[0] - rightHandPos[0], self.handPos[1] - rightHandPos[1], 0)
destZScale = rightHandPos[2] / self.handPos[2]
grabIval = Sequence(Func(toon.wrtReparentTo, self.waterPitcherNode), Func(toon.loop, 'neutral'), Parallel(ActorInterval(toon, 'jump'), Sequence(Wait(0.43), Parallel(ProjectileInterval(toon, duration=0.9, startPos=toon.getPos(self.waterPitcherNode), endPos=self.toonPitcherPosition), LerpHprInterval(toon, 0.9, Point3(0, 0, 0)), LerpScaleInterval(self.waterPitcherModel, 0.9, Point3(1, 1, destZScale))))), Func(toon.setPos, self.toonPitcherPosition), Func(toon.loop, 'leverNeutral'))
return grabIval
def makeToonReleaseInterval(self, toon):
temp1 = self.waterPitcherNode.attachNewNode('temp1')
temp1.setPos(self.toonPitcherPosition)
temp2 = self.waterPitcherNode.attachNewNode('temp2')
temp2.setPos(0, -10, -self.waterPitcherNode.getZ())
startPos = temp1.getPos(render)
endPos = temp2.getPos(render)
temp1.removeNode()
temp2.removeNode()
def getSlideToPos(toon = toon):
return render.getRelativePoint(toon, Point3(0, -10, 0))
if self.gotHitByBoss:
self.notify.debug('creating zap interval instead')
grabIval = Sequence(Func(toon.loop, 'neutral'), Func(toon.wrtReparentTo, render), Parallel(ActorInterval(toon, 'slip-backward'), toon.posInterval(0.5, getSlideToPos, fluid=1)))
else:
grabIval = Sequence(Func(toon.loop, 'neutral'), Func(toon.wrtReparentTo, render), Parallel(ActorInterval(toon, 'jump'), Sequence(Wait(0.43), ProjectileInterval(toon, duration=0.9, startPos=startPos, endPos=endPos))))
return grabIval
def b_clearSmoothing(self):
self.d_clearSmoothing()
self.clearSmoothing()
def d_clearSmoothing(self):
self.sendUpdate('clearSmoothing', [0])
def clearSmoothing(self, bogus = None):
self.pitcherSmoother.clearPositions(1)
def doSmoothTask(self, task):
self.pitcherSmoother.computeAndApplySmoothHpr(self.waterPitcherNode)
return Task.cont
def startSmooth(self):
if not self.smoothStarted:
taskName = self.smoothName
taskMgr.remove(taskName)
self.reloadPosition()
taskMgr.add(self.doSmoothTask, taskName)
self.smoothStarted = 1
def stopSmooth(self):
if self.smoothStarted:
taskName = self.smoothName
taskMgr.remove(taskName)
self.forceToTruePosition()
self.smoothStarted = 0
def __enableControlInterface(self):
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.closeButton = DirectButton(image=(gui.find('**/CloseBtn_UP'),
gui.find('**/CloseBtn_DN'),
gui.find('**/CloseBtn_Rllvr'),
gui.find('**/CloseBtn_UP')), relief=None, scale=2, text=TTLocalizer.BossbotPitcherLeave, text_scale=0.04, text_pos=(0, -0.07), text_fg=VBase4(1, 1, 1, 1), pos=(1.05, 0, -0.82), command=self.__exitPitcher)
self.accept('escape', self.__exitPitcher)
self.accept('control', self.__controlPressed)
self.accept('control-up', self.__controlReleased)
self.accept('InputState-forward', self.__upArrow)
self.accept('InputState-reverse', self.__downArrow)
self.accept('InputState-turnLeft', self.__leftArrow)
self.accept('InputState-turnRight', self.__rightArrow)
self.accept('arrow_up', self.__upArrowKeyPressed)
self.accept('arrow_down', self.__downArrowKeyPressed)
taskMgr.add(self.__watchControls, self.watchControlsName)
taskMgr.doMethodLater(5, self.__displayPitcherAdvice, self.pitcherAdviceName)
self.arrowVert = 0
self.arrowHorz = 0
self.powerBar.show()
return
def __disableControlInterface(self):
if self.closeButton:
self.closeButton.destroy()
self.closeButton = None
self.__cleanupPitcherAdvice()
self.ignore('escape')
self.ignore('control')
self.ignore('control-up')
self.ignore('InputState-forward')
self.ignore('InputState-reverse')
self.ignore('InputState-turnLeft')
self.ignore('InputState-turnRight')
self.ignore('arrow_up')
self.ignore('arrow_down')
self.arrowVert = 0
self.arrowHorz = 0
taskMgr.remove(self.watchControlsName)
taskMgr.remove(self.waterPowerTaskName)
self.resetPowerBar()
self.aimStart = None
self.powerBar.hide()
if self.TugOfWarControls:
self.__killUpdateKeyPressRateTask()
self.keyTTL = []
self.__setMoveSound(None)
return
def __displayPitcherAdvice(self, task):
if self.pitcherAdviceLabel == None:
self.pitcherAdviceLabel = DirectLabel(text=TTLocalizer.BossbotPitcherAdvice, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.69), scale=0.1)
return
def __cleanupPitcherAdvice(self):
if self.pitcherAdviceLabel:
self.pitcherAdviceLabel.destroy()
self.pitcherAdviceLabel = None
taskMgr.remove(self.pitcherAdviceName)
return
def showExiting(self):
if self.closeButton:
self.closeButton.destroy()
self.closeButton = DirectLabel(relief=None, text=TTLocalizer.BossbotPitcherLeaving, pos=(1.05, 0, -0.88), text_pos=(0, 0), text_scale=0.06, text_fg=VBase4(1, 1, 1, 1))
self.__cleanupPitcherAdvice()
return
def __exitPitcher(self):
self.showExiting()
self.d_requestFree(False)
def __controlPressed(self):
self.__cleanupPitcherAdvice()
if self.TugOfWarControls:
if self.power:
self.aimStart = 1
self.__endFireWater()
elif self.state == 'Controlled':
self.__beginFireWater()
def __controlReleased(self):
if self.TugOfWarControls:
pass
elif self.state == 'Controlled':
self.__endFireWater()
def __upArrow(self, pressed):
self.__incrementChangeSeq()
self.__cleanupPitcherAdvice()
if pressed:
self.arrowVert = 1
elif self.arrowVert > 0:
self.arrowVert = 0
def __downArrow(self, pressed):
self.__incrementChangeSeq()
self.__cleanupPitcherAdvice()
if pressed:
self.arrowVert = -1
elif self.arrowVert < 0:
self.arrowVert = 0
def __rightArrow(self, pressed):
self.__incrementChangeSeq()
self.__cleanupPitcherAdvice()
if pressed:
self.arrowHorz = 1
elif self.arrowHorz > 0:
self.arrowHorz = 0
def __leftArrow(self, pressed):
self.__incrementChangeSeq()
self.__cleanupPitcherAdvice()
if pressed:
self.arrowHorz = -1
elif self.arrowHorz < 0:
self.arrowHorz = 0
def __incrementChangeSeq(self):
self.changeSeq = self.changeSeq + 1 & 255
def stopPosHprBroadcast(self):
taskName = self.posHprBroadcastName
taskMgr.remove(taskName)
def startPosHprBroadcast(self):
taskName = self.posHprBroadcastName
self.b_clearSmoothing()
self.d_sendPitcherPos()
taskMgr.remove(taskName)
taskMgr.doMethodLater(self.__broadcastPeriod, self.__posHprBroadcast, taskName)
def __posHprBroadcast(self, task):
self.d_sendPitcherPos()
taskName = self.posHprBroadcastName
taskMgr.doMethodLater(self.__broadcastPeriod, self.__posHprBroadcast, taskName)
return Task.done
def d_sendPitcherPos(self):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setPitcherPos', [self.changeSeq, self.waterPitcherNode.getH(), timestamp])
def setPitcherPos(self, changeSeq, h, timestamp):
self.changeSeq = changeSeq
if self.smoothStarted:
now = globalClock.getFrameTime()
local = globalClockDelta.networkToLocalTime(timestamp, now)
self.pitcherSmoother.setH(h)
self.pitcherSmoother.setTimestamp(local)
self.pitcherSmoother.markPosition()
else:
self.waterPitcherNode.setH(h)
def __watchControls(self, task):
if self.arrowHorz:
self.__movePitcher(self.arrowHorz)
else:
self.__setMoveSound(None)
return Task.cont
def __movePitcher(self, xd):
dt = globalClock.getDt()
h = self.waterPitcherNode.getH() - xd * self.rotateSpeed * dt
h %= 360
self.notify.debug('rotSpeed=%.2f curH=%.2f xd =%.2f, dt = %.2f, h=%.2f' % (self.rotateSpeed,
self.waterPitcherNode.getH(),
xd,
dt,
h))
limitH = h
self.waterPitcherNode.setH(limitH)
if xd:
self.__setMoveSound(self.pitcherMoveSfx)
def reloadPosition(self):
self.pitcherSmoother.clearPositions(0)
self.pitcherSmoother.setHpr(self.waterPitcherNode.getHpr())
self.pitcherSmoother.setPhonyTimestamp()
def forceToTruePosition(self):
if self.pitcherSmoother.getLatestPosition():
self.pitcherSmoother.applySmoothHpr(self.waterPitcherNode)
self.pitcherSmoother.clearPositions(1)
def getSprayTrack(self, color, origin, target, dScaleUp, dHold, dScaleDown, horizScale = 1.0, vertScale = 1.0, parent = render):
track = Sequence()
SPRAY_LEN = 1.5
sprayProp = MovieUtil.globalPropPool.getProp('spray')
sprayScale = hidden.attachNewNode('spray-parent')
sprayRot = hidden.attachNewNode('spray-rotate')
spray = sprayRot
spray.setColor(color)
if color[3] < 1.0:
spray.setTransparency(1)
def showSpray(sprayScale, sprayRot, sprayProp, origin, target, parent):
if callable(origin):
origin = origin()
if callable(target):
target = target()
sprayRot.reparentTo(parent)
sprayRot.clearMat()
sprayScale.reparentTo(sprayRot)
sprayScale.clearMat()
sprayProp.reparentTo(sprayScale)
sprayProp.clearMat()
sprayRot.setPos(origin)
sprayRot.lookAt(Point3(target))
track.append(Func(showSpray, sprayScale, sprayRot, sprayProp, origin, target, parent))
def calcTargetScale(target = target, origin = origin, horizScale = horizScale, vertScale = vertScale):
if callable(target):
target = target()
if callable(origin):
origin = origin()
distance = Vec3(target - origin).length()
yScale = distance / SPRAY_LEN
targetScale = Point3(yScale * horizScale, yScale, yScale * vertScale)
return targetScale
track.append(LerpScaleInterval(sprayScale, dScaleUp, calcTargetScale, startScale=Point3(0.01, 0.01, 0.01)))
track.append(Func(self.checkHitObject))
track.append(Wait(dHold))
def prepareToShrinkSpray(spray, sprayProp, origin, target):
if callable(target):
target = target()
if callable(origin):
origin = origin()
sprayProp.setPos(Point3(0.0, -SPRAY_LEN, 0.0))
spray.setPos(target)
track.append(Func(prepareToShrinkSpray, spray, sprayProp, origin, target))
track.append(LerpScaleInterval(sprayScale, dScaleDown, Point3(0.01, 0.01, 0.01)))
def hideSpray(spray, sprayScale, sprayRot, sprayProp, propPool):
sprayProp.detachNode()
MovieUtil.removeProp(sprayProp)
sprayRot.removeNode()
sprayScale.removeNode()
track.append(Func(hideSpray, spray, sprayScale, sprayRot, sprayProp, MovieUtil.globalPropPool))
return track
def checkHitObject(self):
if not self.hitObject:
return
if self.avId != base.localAvatar.doId:
return
tag = self.hitObject.getNetTag('pieCode')
pieCode = int(tag)
if pieCode == ToontownGlobals.PieCodeBossCog:
self.hitBossSoundInterval.start()
self.sendUpdate('waterHitBoss', [self.index])
if self.TugOfWarControls:
damage = 1
if self.lastPowerFired < self.YELLOW_POWER_THRESHOLD:
damage = 1
elif self.lastPowerFired < self.RED_POWER_THRESHOLD:
damage = 2
else:
damage = 3
self.boss.d_hitBoss(damage)
else:
damage = 1
if self.lastPowerFired < self.YELLOW_POWER_THRESHOLD:
damage = 1
elif self.lastPowerFired < self.RED_POWER_THRESHOLD:
damage = 2
else:
damage = 3
self.boss.d_hitBoss(damage)
def waterHitBoss(self, tableIndex):
if self.index == tableIndex:
self.hitBossSoundInterval.start()
def setupPowerBar(self):
self.powerBar = DirectWaitBar(pos=(0.0, 0, -0.94), relief=DGG.SUNKEN, frameSize=(-2.0,
2.0,
-0.2,
0.2), borderWidth=(0.02, 0.02), scale=0.25, range=1, sortOrder=50, frameColor=(0.5, 0.5, 0.5, 0.5), barColor=(0.75, 0.75, 1.0, 0.8), text='', text_scale=0.26, text_fg=(1, 1, 1, 1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.power = 0
self.powerBar['value'] = self.power
self.powerBar.hide()
def resetPowerBar(self):
self.power = 0
self.powerBar['value'] = self.power
self.powerBar['text'] = ''
self.keyTTL = []
def __beginFireWater(self):
if self.fireTrack and self.fireTrack.isPlaying():
return
if self.aimStart != None:
return
if not self.state == 'Controlled':
return
if not self.avId == localAvatar.doId:
return
time = globalClock.getFrameTime()
self.aimStart = time
messenger.send('wakeup')
taskMgr.add(self.__updateWaterPower, self.waterPowerTaskName)
return
def __endFireWater(self):
if self.aimStart == None:
return
if not self.state == 'Controlled':
return
if not self.avId == localAvatar.doId:
return
taskMgr.remove(self.waterPowerTaskName)
messenger.send('wakeup')
self.aimStart = None
origin = self.nozzle.getPos(render)
target = self.boss.getPos(render)
angle = deg2Rad(self.waterPitcherNode.getH() + 90)
x = math.cos(angle)
y = math.sin(angle)
fireVector = Point3(x, y, 0)
if self.power < 0.001:
self.power = 0.001
self.lastPowerFired = self.power
fireVector *= self.fireLength * self.power
target = origin + fireVector
segment = CollisionSegment(origin[0], origin[1], origin[2], target[0], target[1], target[2])
fromObject = render.attachNewNode(CollisionNode('pitcherColNode'))
fromObject.node().addSolid(segment)
fromObject.node().setFromCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask | ToontownGlobals.FloorBitmask)
fromObject.node().setIntoCollideMask(BitMask32.allOff())
queue = CollisionHandlerQueue()
base.cTrav.addCollider(fromObject, queue)
base.cTrav.traverse(render)
queue.sortEntries()
self.hitObject = None
if queue.getNumEntries():
entry = queue.getEntry(0)
target = entry.getSurfacePoint(render)
self.hitObject = entry.getIntoNodePath()
base.cTrav.removeCollider(fromObject)
fromObject.removeNode()
self.d_firingWater(origin, target)
self.fireWater(origin, target)
self.resetPowerBar()
return
def __updateWaterPower(self, task):
if not self.powerBar:
print('### no power bar!!!')
return task.done
newPower = self.__getWaterPower(globalClock.getFrameTime())
self.power = newPower
self.powerBar['value'] = newPower
if self.power < self.YELLOW_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(0.75, 0.75, 1.0, 0.8)
elif self.power < self.RED_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(1.0, 1.0, 0.0, 0.8)
else:
self.powerBar['barColor'] = VBase4(1.0, 0.0, 0.0, 0.8)
return task.cont
def __getWaterPower(self, time):
elapsed = max(time - self.aimStart, 0.0)
t = elapsed / self.waterPowerSpeed
exponent = self.waterPowerExponent
if t > 1:
t = t % 1
power = 1 - math.pow(1 - t, exponent)
if power > 1.0:
power = 1.0
return power
def d_firingWater(self, origin, target):
self.sendUpdate('firingWater', [origin[0],
origin[1],
origin[2],
target[0],
target[1],
target[2]])
def firingWater(self, startX, startY, startZ, endX, endY, endZ):
origin = Point3(startX, startY, startZ)
target = Point3(endX, endY, endZ)
self.fireWater(origin, target)
def fireWater(self, origin, target):
color = VBase4(0.75, 0.75, 1, 0.8)
dScaleUp = 0.1
dHold = 0.3
dScaleDown = 0.1
horizScale = 0.1
vertScale = 0.1
sprayTrack = self.getSprayTrack(color, origin, target, dScaleUp, dHold, dScaleDown, horizScale, vertScale)
duration = self.squirtSfx.length()
if sprayTrack.getDuration() < duration:
duration = sprayTrack.getDuration()
soundTrack = SoundInterval(self.squirtSfx, node=self.waterPitcherModel, duration=duration)
self.fireTrack = Parallel(sprayTrack, soundTrack)
self.fireTrack.start()
def getPos(self, wrt = render):
return self.tableGroup.getPos(wrt)
def getLocator(self):
return self.tableGroup
def enterFlat(self, avId):
self.prepareForPhaseFour()
self.resetPowerBar()
self.notify.debug('enterFlat %d' % self.index)
if self.avId:
toon = base.cr.doId2do.get(self.avId)
if toon:
toon.wrtReparentTo(render)
toon.setZ(0)
self.tableGroup.setScale(1, 1, 0.01)
if self.avId and self.avId == localAvatar.doId:
localAvatar.b_squish(ToontownGlobals.BossCogDamageLevels[ToontownGlobals.BossCogMoveAttack])
def exitFlat(self):
self.tableGroup.setScale(1.0)
if self.avId:
toon = base.cr.doId2do.get(self.avId)
if toon:
if toon == localAvatar:
self.boss.toCraneMode()
toon.b_setAnimState('neutral')
toon.setAnimState('neutral')
toon.loop('leverNeutral')
def __allowDetect(self, task):
if self.fadeTrack:
self.fadeTrack.finish()
self.fadeTrack = Sequence(self.tableGroup.colorScaleInterval(0.2, VBase4(1, 1, 1, 1)), Func(self.tableGroup.clearColorScale), Func(self.tableGroup.clearTransparency))
self.fadeTrack.start()
self.allowLocalRequestControl = True
def gotBossZapped(self):
self.showExiting()
self.d_requestFree(True)
def __upArrowKeyPressed(self):
if self.TugOfWarControls:
self.__pressHandler(0)
def __downArrowKeyPressed(self):
if self.TugOfWarControls:
self.__pressHandler(1)
def __pressHandler(self, index):
if index == self.buttons[0]:
self.keyTTL.insert(0, 1.0)
if not self.OnlyUpArrow:
self.buttons.reverse()
def __spawnUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
taskMgr.doMethodLater(0.1, self.__updateKeyPressRateTask, self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def __killUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def __updateKeyPressRateTask(self, task):
if self.state not in 'Controlled':
return Task.done
for i in range(len(self.keyTTL)):
self.keyTTL[i] -= 0.1
for i in range(len(self.keyTTL)):
if self.keyTTL[i] <= 0:
a = self.keyTTL[0:i]
del self.keyTTL
self.keyTTL = a
break
self.keyRate = len(self.keyTTL)
keyRateDiff = self.keyRate - self.BASELINE_KEY_RATE
diffPower = keyRateDiff / 300.0
if self.power < 1 and diffPower > 0:
diffPower = diffPower * math.pow(1 - self.power, 1.25)
newPower = self.power + diffPower
if newPower > 1:
newPower = 1
elif newPower < 0:
newPower = 0
self.notify.debug('diffPower=%.2f keyRate = %d, newPower=%.2f' % (diffPower, self.keyRate, newPower))
self.power = newPower
self.powerBar['value'] = newPower
if self.power < self.YELLOW_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(0.75, 0.75, 1.0, 0.8)
elif self.power < self.RED_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(1.0, 1.0, 0.0, 0.8)
else:
self.powerBar['barColor'] = VBase4(1.0, 0.0, 0.0, 0.8)
self.__spawnUpdateKeyPressRateTask()
return Task.done
def __setMoveSound(self, sfx):
if sfx != self.moveSound:
if self.moveSound:
self.moveSound.stop()
self.moveSound = sfx
if self.moveSound:
base.playSfx(self.moveSound, looping=1, volume=0.5)
|
b4c0882dae4e5fb0486033d0d8a287b6e55c6e69
|
2278989a22b2d230d238e3eb48ac9b94602b09ff
|
/Text/PresidioPIIAnonymization/powerskill/powerskill/__init__.py
|
c4adbb1b12281e5f53190ebcbfe544ce34185675
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-search-power-skills
|
8c7d4a67c6cb0328043aff9fd0077be179e76fde
|
30cb169b0752a9648aa2c58fb0534053acbb636d
|
refs/heads/main
| 2023-08-19T04:41:44.935129
| 2023-07-31T20:41:57
| 2023-07-31T20:41:57
| 191,627,575
| 184
| 154
|
MIT
| 2023-09-12T23:00:30
| 2019-06-12T18:51:45
|
C#
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
__init__.py
|
from .presidio import Presidio
__all__ = [
"Presidio"
]
|
8e6fad7b5d70ec2a2a6fbbed43e540783ef5598f
|
14fcb8b3a1d1771e69f43fd6c10d1b930cc0c415
|
/flurs/model/user_knn.py
|
8af1cbd11f21b7bd44279d2d73c4bab0597e8cca
|
[
"MIT"
] |
permissive
|
takuti/flurs
|
43f76f5d0917eb576c2869aa078b28711e17032a
|
04b8f4c2539f962fecf2d8fef342123230c1f263
|
refs/heads/master
| 2022-03-21T14:53:18.547818
| 2022-02-08T14:03:06
| 2022-02-08T14:03:06
| 71,954,398
| 116
| 23
|
MIT
| 2022-02-08T14:03:08
| 2016-10-26T01:37:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
user_knn.py
|
from sklearn.base import BaseEstimator
import numpy as np
class UserKNN(BaseEstimator):
"""Incremental User-based Collaborative Filtering using k-Nearest-Neighbor (kNN).
Parameters
----------
k : int, default=5
Number of nearest neighbors.
References
----------
.. [1] M. Pepagelis et al. **Incremental Collaborative Filtering for
Highly-Scalable Recommendation Algorithms**. In *Foundations of Intelligent
Systems*, pp. 553-561, Springer Berlin Heidelberg, 2005.
"""
def __init__(self, k=5):
# number of nearest neighbors
self.k = k
# user-item matrix
self.R = np.array([])
# user-user similarity matrix
self.S = np.array([])
# user-user similarity: S = B / (sqrt(C) * sqrt(D))
self.B = np.array([])
self.C = np.array([])
self.D = np.array([])
def update_model(self, ua, ia, value):
prev_r = self.R[ua, ia]
new_submit = prev_r == 0
self.R[ua, ia] = value
prev_mean = self.users[ua]["mean"]
if new_submit:
self.users[ua]["count"] += 1
self.users[ua]["mean"] = (
self.R[ua, ia] / self.users[ua]["count"]
+ (self.users[ua]["count"] - 1) / self.users[ua]["count"] * prev_mean
)
else:
self.users[ua]["mean"] = (self.R[ua, ia] - prev_r) / (
self.users[ua]["count"] - 1
) + prev_mean
d = self.users[ua]["mean"] - prev_mean
for uy in range(self.n_user):
# skip myself
if uy == ua:
continue
e = f = g = 0.0
had_uy_rated_ia = self.R[uy, ia] != 0
if had_uy_rated_ia:
ua_normalized = self.R[ua, ia] - self.users[ua]["mean"]
uy_normalized = self.R[uy, ia] - self.users[uy]["mean"]
if new_submit:
e = ua_normalized * uy_normalized
f = ua_normalized ** 2
g = uy_normalized ** 2
else:
e = (self.R[ua, ia] - prev_r) * uy_normalized
f = (self.R[ua, ia] - prev_r) ** 2 + 2 * (
self.R[ua, ia] - prev_r
) * ua_normalized
g = 0.0
for ih in range(self.n_item):
# only for co-rated items
if self.R[ua, ih] != 0 and self.R[uy, ih] != 0:
e = e - d * (self.R[uy, ih] - self.users[uy]["mean"])
f = f + d ** 2 - 2 * d * (self.R[ua, ih] - prev_mean)
self.B[ua, uy] += e
self.C[ua, uy] += f
self.D[ua, uy] += g
# avoid zero division
idx = (self.C[ua, :] != 0) & (self.C[ua, :] != 0)
self.S[ua, idx] = self.B[ua, idx] / (
np.sqrt(self.C[ua, idx]) * np.sqrt(self.D[ua, idx])
)
|
71b524e87965f538f26e1bfee25e97dd4ddf895b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/unifiprotect/__init__.py
|
174f60fd1352a53db01e88b20c7c46661f084daa
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,323
|
py
|
__init__.py
|
"""UniFi Protect Platform."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from aiohttp.client_exceptions import ServerDisconnectedError
from pyunifiprotect.exceptions import ClientError, NotAuthorized
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
issue_registry as ir,
)
from homeassistant.helpers.issue_registry import IssueSeverity
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_ALLOW_EA,
DEFAULT_SCAN_INTERVAL,
DEVICES_THAT_ADOPT,
DOMAIN,
MIN_REQUIRED_PROTECT_V,
OUTDATED_LOG_MESSAGE,
PLATFORMS,
)
from .data import ProtectData, async_ufp_instance_for_config_entry_ids
from .discovery import async_start_discovery
from .migrate import async_migrate_data
from .services import async_cleanup_services, async_setup_services
from .utils import (
_async_unifi_mac_from_hass,
async_create_api_client,
async_get_devices,
)
from .views import ThumbnailProxyView, VideoProxyView
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=DEFAULT_SCAN_INTERVAL)
CONFIG_SCHEMA = cv.config_entry_only_config_schema(DOMAIN)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the UniFi Protect."""
# Only start discovery once regardless of how many entries they have
async_start_discovery(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the UniFi Protect config entries."""
protect = async_create_api_client(hass, entry)
_LOGGER.debug("Connect to UniFi Protect")
data_service = ProtectData(hass, protect, SCAN_INTERVAL, entry)
try:
nvr_info = await protect.get_nvr()
except NotAuthorized as err:
raise ConfigEntryAuthFailed(err) from err
except (asyncio.TimeoutError, ClientError, ServerDisconnectedError) as err:
raise ConfigEntryNotReady from err
if nvr_info.version < MIN_REQUIRED_PROTECT_V:
_LOGGER.error(
OUTDATED_LOG_MESSAGE,
nvr_info.version,
MIN_REQUIRED_PROTECT_V,
)
return False
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=nvr_info.mac)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = data_service
entry.async_on_unload(entry.add_update_listener(_async_options_updated))
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, data_service.async_stop)
)
if (
not entry.options.get(CONF_ALLOW_EA, False)
and await nvr_info.get_is_prerelease()
):
ir.async_create_issue(
hass,
DOMAIN,
"ea_warning",
is_fixable=True,
is_persistent=True,
learn_more_url="https://www.home-assistant.io/integrations/unifiprotect#about-unifi-early-access",
severity=IssueSeverity.WARNING,
translation_key="ea_warning",
translation_placeholders={"version": str(nvr_info.version)},
data={"entry_id": entry.entry_id},
)
try:
await _async_setup_entry(hass, entry, data_service)
except Exception as err:
if await nvr_info.get_is_prerelease():
# If they are running a pre-release, its quite common for setup
# to fail so we want to create a repair issue for them so its
# obvious what the problem is.
ir.async_create_issue(
hass,
DOMAIN,
f"ea_setup_failed_{nvr_info.version}",
is_fixable=False,
is_persistent=False,
learn_more_url="https://www.home-assistant.io/integrations/unifiprotect#about-unifi-early-access",
severity=IssueSeverity.ERROR,
translation_key="ea_setup_failed",
translation_placeholders={
"error": str(err),
"version": str(nvr_info.version),
},
)
ir.async_delete_issue(hass, DOMAIN, "ea_warning")
_LOGGER.exception("Error setting up UniFi Protect integration: %s", err)
raise
return True
async def _async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, data_service: ProtectData
) -> None:
await async_migrate_data(hass, entry, data_service.api)
await data_service.async_setup()
if not data_service.last_update_success:
raise ConfigEntryNotReady
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
async_setup_services(hass)
hass.http.register_view(ThumbnailProxyView(hass))
hass.http.register_view(VideoProxyView(hass))
async def _async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload UniFi Protect config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
await data.async_stop()
hass.data[DOMAIN].pop(entry.entry_id)
async_cleanup_services(hass)
return bool(unload_ok)
async def async_remove_config_entry_device(
hass: HomeAssistant, config_entry: ConfigEntry, device_entry: dr.DeviceEntry
) -> bool:
"""Remove ufp config entry from a device."""
unifi_macs = {
_async_unifi_mac_from_hass(connection[1])
for connection in device_entry.connections
if connection[0] == dr.CONNECTION_NETWORK_MAC
}
api = async_ufp_instance_for_config_entry_ids(hass, {config_entry.entry_id})
assert api is not None
if api.bootstrap.nvr.mac in unifi_macs:
return False
for device in async_get_devices(api.bootstrap, DEVICES_THAT_ADOPT):
if device.is_adopted_by_us and device.mac in unifi_macs:
return False
return True
|
cf7fe185d69ac6efc244ed4da2ed0e4033a4292d
|
4e212b61aeb142fca3b2ea801df76a9c37cf9d9d
|
/cuegui/cuegui/plugins/MonitorHostsPlugin.py
|
be40d74532fbfd2d1174832a8a84dad372fe8284
|
[
"Apache-2.0"
] |
permissive
|
AcademySoftwareFoundation/OpenCue
|
90f2c9e90370966a9d7488e7022d484805abce33
|
c1f335d22e59cdf75859aa14ecdfe43d9cb43e95
|
refs/heads/master
| 2023-08-25T21:53:58.408872
| 2023-08-09T15:47:45
| 2023-08-09T15:47:45
| 133,735,379
| 439
| 191
|
Apache-2.0
| 2023-09-14T00:46:43
| 2018-05-16T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,824
|
py
|
MonitorHostsPlugin.py
|
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plugin for viewing the list of hosts and performing administrative tasks."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from qtpy import QtCore
from qtpy import QtWidgets
import cuegui.AbstractDockWidget
import cuegui.HostMonitor
import cuegui.ProcMonitor
PLUGIN_NAME = "Monitor Hosts"
PLUGIN_CATEGORY = "Cuecommander"
PLUGIN_DESCRIPTION = "An administrator interface to hosts and procs"
PLUGIN_REQUIRES = "CueCommander"
PLUGIN_PROVIDES = "HostMonitorDockWidget"
class HostMonitorDockWidget(cuegui.AbstractDockWidget.AbstractDockWidget):
"""Plugin for viewing the list of hosts and performing administrative tasks."""
def __init__(self, parent):
cuegui.AbstractDockWidget.AbstractDockWidget.__init__(self, parent, PLUGIN_NAME)
self.__monitorHosts = cuegui.HostMonitor.HostMonitor(self)
self.__monitorProcs = cuegui.ProcMonitor.ProcMonitor(self)
self.__splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
self.layout().addWidget(self.__splitter)
self.__splitter.addWidget(self.__monitorHosts)
self.__splitter.addWidget(self.__monitorProcs)
self.pluginRegisterSettings([("splitterSize",
self.__splitter.sizes,
self.__splitter.setSizes),
("hostColumnVisibility",
self.__monitorHosts.getColumnVisibility,
self.__monitorHosts.setColumnVisibility),
("procColumnVisibility",
self.__monitorProcs.getColumnVisibility,
self.__monitorProcs.setColumnVisibility),
("hostColumnOrder",
self.__monitorHosts.getColumnOrder,
self.__monitorHosts.setColumnOrder),
("procColumnOrder",
self.__monitorProcs.getColumnOrder,
self.__monitorProcs.setColumnOrder)])
|
22c758d99497e1f3715059d906c0242e8d87ada0
|
1cce1a31045943a27ebfd265002bf0c0a9af8757
|
/japonicus/configIndicators.py
|
3c885f15329899b1a1718cddc16217dd28000f53
|
[
"MIT"
] |
permissive
|
Gab0/japonicus
|
e83ec8a0fe9a5d26ee21d8eaf4048137dc277dcd
|
af4aaf74f8b2195e1cdb512f00d33c5ad9868805
|
refs/heads/master
| 2023-08-04T19:26:41.523035
| 2019-11-08T22:44:15
| 2019-11-08T22:44:15
| 102,791,982
| 234
| 98
|
MIT
| 2023-07-06T21:14:08
| 2017-09-07T22:34:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
configIndicators.py
|
#!/bin/python
cI = {
"ADX": {"active": True, "period": 14, "thresholds.up": 70, "thresholds.down": 50},
"ATR": {"active": True, "period": 14, "thresholds.up": 70, "thresholds.down": 50},
"PPO": {
"active": True,
"short": (6, 18), # short EMA
"long": (13, 39), # long EMA
"signal": (1, 18), # 100 * (shortEMA - longEMA / longEMA)
"thresholds.down": (-0.5, 0.), # trend thresholds
"thresholds.up": (0., 0.5), # trend thresholds
},
"TSI": {
"active": True,
"thresholds.up": (15, 35),
"thresholds.down": (-35, -15),
"short": (3, 12),
"long": (15, 35),
},
"LRC": {
"active": True,
"thresholds.up": (15, 35),
"thresholds.down": (-35, -15),
"depth": (3, 18),
},
"RSI": {
"active": True,
"interval": (7, 21), # weight
"thresholds.down": (15, 45), # trend thresholds
"thresholds.up": (45, 140), # trend thresholds
},
"SMMA": {
"active": True,
"weight": (7, 16),
"thresholds.up": (0, 0.1),
"thresholds.down": (-0.1, 0),
},
"DEMA": {
"active": True,
"short": (7, 15),
"long": (12, 35),
"thresholds.up": (0, 0.1),
"thresholds.down": (-0.1, 0),
},
"CCI": {
"active": True,
"consistant": (7, 21), # constant multiplier. 0.015 gets to around 70% fit
"history": (45, 135), # history size, make same or smaller than history
"thresholds.down": (-150, -50), # trend thresholds
"thresholds.up": (50, 150), # trend thresholds
"thresholds.persistence": (4, 10),
},
}
|
145e7283ab91a189a9eba04e6dc88bb3272d6eee
|
e7f2a8c466c14b9821e59740ed0407107e1254a4
|
/rasa/engine/storage/storage.py
|
a113c0efc609dbc90677a645b1bccd73e9f8083b
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
RasaHQ/rasa
|
4a31134308a9a4d8824fe7faef02526accdd0f19
|
50857610bdf0c26dc61f3203a6cbb4bcf193768c
|
refs/heads/main
| 2023-08-28T01:53:56.981600
| 2023-08-25T10:20:49
| 2023-08-25T10:20:49
| 70,908,208
| 13,167
| 3,739
|
Apache-2.0
| 2023-09-14T09:54:40
| 2016-10-14T12:27:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
storage.py
|
from __future__ import annotations
import abc
import logging
import typing
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import List, Tuple, Union, Text, Generator, Dict, Any, Optional
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
from rasa.exceptions import UnsupportedModelVersionError
from rasa.engine.storage.resource import Resource
from rasa.shared.core.domain import Domain
from rasa.shared.data import TrainingType
if typing.TYPE_CHECKING:
from rasa.engine.graph import GraphSchema, GraphModelConfiguration
logger = logging.getLogger(__name__)
class ModelStorage(abc.ABC):
"""Serves as storage backend for `GraphComponents` which need persistence."""
@classmethod
@abc.abstractmethod
def create(cls, storage_path: Path) -> ModelStorage:
"""Creates the storage.
Args:
storage_path: Directory which will contain the persisted graph components.
"""
...
@classmethod
@abc.abstractmethod
def from_model_archive(
cls, storage_path: Path, model_archive_path: Union[Text, Path]
) -> Tuple[ModelStorage, ModelMetadata]:
"""Unpacks a model archive and initializes a `ModelStorage`.
Args:
storage_path: Directory which will contain the persisted graph components.
model_archive_path: The path to the model archive.
Returns:
Initialized model storage, and metadata about the model.
Raises:
`UnsupportedModelError` if the loaded meta data indicates that the model
has been created with an outdated Rasa version.
"""
...
@classmethod
@abc.abstractmethod
def metadata_from_archive(
cls, model_archive_path: Union[Text, Path]
) -> ModelMetadata:
"""Retrieves metadata from archive.
Args:
model_archive_path: The path to the model archive.
Returns:
Metadata about the model.
Raises:
`UnsupportedModelError` if the loaded meta data indicates that the model
has been created with an outdated Rasa version.
"""
...
@contextmanager
@abc.abstractmethod
def write_to(self, resource: Resource) -> Generator[Path, None, None]:
"""Persists data for a given resource.
This `Resource` can then be accessed in dependent graph nodes via
`model_storage.read_from`.
Args:
resource: The resource which should be persisted.
Returns:
A directory which can be used to persist data for the given `Resource`.
"""
...
@contextmanager
@abc.abstractmethod
def read_from(self, resource: Resource) -> Generator[Path, None, None]:
"""Provides the data of a persisted `Resource`.
Args:
resource: The `Resource` whose persisted should be accessed.
Returns:
A directory containing the data of the persisted `Resource`.
Raises:
ValueError: In case no persisted data for the given `Resource` exists.
"""
...
@abc.abstractmethod
def create_model_package(
self,
model_archive_path: Union[Text, Path],
model_configuration: GraphModelConfiguration,
domain: Domain,
) -> ModelMetadata:
"""Creates a model archive containing all data to load and run the model.
Args:
model_archive_path: The path to the archive which should be created.
model_configuration: The model configuration (schemas, language, etc.)
domain: The `Domain` which was used to train the model.
Returns:
The model metadata.
"""
...
@dataclass()
class ModelMetadata:
"""Describes a trained model."""
trained_at: datetime
rasa_open_source_version: Text
model_id: Text
assistant_id: Optional[Text]
domain: Domain
train_schema: GraphSchema
predict_schema: GraphSchema
project_fingerprint: Text
core_target: Optional[Text]
nlu_target: Text
language: Optional[Text]
spaces: Optional[List[Dict[Text, Any]]] = None
training_type: TrainingType = TrainingType.BOTH
def __post_init__(self) -> None:
"""Raises an exception when the metadata indicates an unsupported version.
Raises:
`UnsupportedModelException` if the `rasa_open_source_version` is lower
than the minimum compatible version
"""
minimum_version = version.parse(MINIMUM_COMPATIBLE_VERSION)
model_version = version.parse(self.rasa_open_source_version)
if model_version < minimum_version:
raise UnsupportedModelVersionError(model_version=model_version)
def as_dict(self) -> Dict[Text, Any]:
"""Returns serializable version of the `ModelMetadata`."""
return {
"domain": self.domain.as_dict(),
"trained_at": self.trained_at.isoformat(),
"model_id": self.model_id,
"assistant_id": self.assistant_id,
"rasa_open_source_version": self.rasa_open_source_version,
"train_schema": self.train_schema.as_dict(),
"predict_schema": self.predict_schema.as_dict(),
"training_type": self.training_type.value,
"project_fingerprint": self.project_fingerprint,
"core_target": self.core_target,
"nlu_target": self.nlu_target,
"language": self.language,
"spaces": self.spaces,
}
@classmethod
def from_dict(cls, serialized: Dict[Text, Any]) -> ModelMetadata:
"""Loads `ModelMetadata` which has been serialized using `metadata.as_dict()`.
Args:
serialized: Serialized `ModelMetadata` (e.g. read from disk).
Returns:
Instantiated `ModelMetadata`.
"""
from rasa.engine.graph import GraphSchema
return ModelMetadata(
trained_at=datetime.fromisoformat(serialized["trained_at"]),
rasa_open_source_version=serialized["rasa_open_source_version"],
model_id=serialized["model_id"],
assistant_id=serialized.get("assistant_id"),
domain=Domain.from_dict(serialized["domain"]),
train_schema=GraphSchema.from_dict(serialized["train_schema"]),
predict_schema=GraphSchema.from_dict(serialized["predict_schema"]),
training_type=TrainingType(serialized["training_type"]),
project_fingerprint=serialized["project_fingerprint"],
core_target=serialized["core_target"],
nlu_target=serialized["nlu_target"],
language=serialized["language"],
# optional, since introduced later
spaces=serialized.get("spaces"),
)
|
5975f0decc3c3f6a53794e4dc089ef675c91c7d6
|
2a76ca8c01e7abe6ef64d030ecbb65e88641b278
|
/glumpy/api/matplotlib/axes.py
|
26b3e34f379756afd8ea9bcef55f4dd397ea2cd6
|
[] |
permissive
|
glumpy/glumpy
|
18bfc2d76b7a5fc126fbebddf2970d95238fc66b
|
75408635bd46e48ff10939e308a71eafdaff35e8
|
refs/heads/master
| 2023-09-03T11:48:52.087002
| 2023-04-20T15:23:59
| 2023-04-20T15:23:59
| 23,520,171
| 1,228
| 225
|
BSD-3-Clause
| 2023-07-07T07:25:18
| 2014-08-31T18:30:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
axes.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gloo, gl, transforms
from glumpy.graphics.collections import PointCollection
vertex = """
attribute vec2 position;
void main()
{
gl_Position = vec4(position,0,1);
<viewport.transform>;
}
"""
fragment = """
uniform vec4 color;
void main()
{
gl_FragColor = color;
<viewport.clipping>;
}
"""
class Axes(app.Viewport):
""" """
def __init__(self, rect=[0,0,1,1], facecolor=(1,1,1,1),
xscale = None, yscale = None, zscale = None,
projection = None, interface = None, aspect=None):
size = rect[2], rect[3]
position = rect[0]+size[0]/2, rect[1]+size[1]/2
anchor = 0.5, 0.5
app.Viewport.__init__(self, size, position, anchor, aspect)
xscale = xscale if xscale is not None else transforms.LinearScale()
yscale = yscale if yscale is not None else transforms.LinearScale()
zscale = zscale if zscale is not None else transforms.LinearScale()
projection = projection if projection is not None else transforms.IdentityProjection()
interface = interface if interface is not None else transforms.Position()
self._viewport = transforms.Viewport()
xscale = xscale('.x', name = 'xscale')
yscale = yscale('.y', name = 'yscale')
zscale = zscale('.z', name = 'zscale')
self._scale = transforms.Position(xscale, yscale, zscale)
self._projection = projection #transforms.IdentityProjection()
self._interface = interface(aspect=aspect)
self.program = gloo.Program(vertex, fragment, count=4)
self.program['position'] = [(-1,-1), (-1,+1), (+1,-1), (+1,+1)]
self.program['color'] = facecolor
self.program['viewport'] = self._viewport
self._transform = self._interface(self._projection(self._scale))
self.attach(self._transform)
self._drawables = []
def add_drawable(self, collection):
drawable = collection.view(transform=self._transform, viewport= self._viewport)
self._drawables.append(drawable)
@property
def viewport(self):
""" Viewport transform """
return self._viewport
@property
def transform(self):
""" Global transform """
return self._transform
@property
def scale(self):
""" Scale transform """
return self._scale
def add_axes(self, rect=[0,0,1,1], facecolor=(1,1,1,1),
xscale = None, yscale = None, zscale = None,
projection = None, interface = None, aspect=None):
axes = Axes(rect=rect,facecolor=facecolor, aspect=aspect,
xscale=xscale, yscale=yscale, zscale=zscale,
projection=projection, interface=interface)
self.add(axes)
return axes
def on_draw(self, dt):
self.program.draw(gl.GL_TRIANGLE_STRIP)
for drawable in self._drawables:
drawable.draw()
app.Viewport.on_draw(self,dt)
def on_resize(self, width, height):
if self.parent == None:
self._requested_size = width, height
self._compute_viewport()
self.dispatcher.dispatch_event("on_resize", self.size[0], self.size[1])
if self.viewport.is_attached:
# self._viewport.dispatch_event("on_resize", width, height)
self._viewport["global"] = self.root.extents
self._viewport["extents"] = self.extents
for child in self._children:
child.dispatch_event("on_resize", width, height)
|
911bfd4dab55682398fc8b914ab3c2bf1eaa471a
|
abdbe2c4081ae1d52e81d19a71101a760508d524
|
/scripts/recombination/simulation/makeRandomRecombinants.py
|
00621deee529ad71c353ffe451e20c9041657ec5
|
[
"MIT"
] |
permissive
|
yatisht/usher
|
70b501a3cf4d7f144164aa08682e1b57bcb2bcf4
|
2df81ee5108d6dc85dc7b3e8aaecf1c6a32fe312
|
refs/heads/master
| 2023-05-12T17:13:12.397649
| 2023-05-09T20:24:11
| 2023-05-09T20:24:11
| 296,144,053
| 121
| 37
|
MIT
| 2023-05-09T20:24:12
| 2020-09-16T20:44:57
|
C++
|
UTF-8
|
Python
| false
| false
| 15,510
|
py
|
makeRandomRecombinants.py
|
#!/usr/bin/env python3
# Name: Bryan Thornlow
# Date: 2/1/2018
# compareDatabases.py
import sys
import os
import datetime
import numpy
from numpy import random
import gzip
import math
import argparse
##########################
###### COMMAND LINE ######
##########################
class CommandLine(object):
"""Handles the input arguments from the command line. Manages
the argument parser."""
def __init__(self, inOpts=None):
'''
CommandLine constructor.
Implements a parser to interpret the command line input using argparse.
'''
self.parser = argparse.ArgumentParser()
self.parser.add_argument("-b", "--breakpoints", help="Number of breakpoints that each recombinant sample will have. Must be [1..4] (Default = 1).", default=1, type=int)
self.parser.add_argument("-s", "--samples", help="Number of recombinant samples to create (Default = 100).", default=100, type=int)
self.parser.add_argument("-c", "--copies", help="Number of identical copies to make for each recombinant sample (Default = 10).", default=10, type=int)
self.parser.add_argument("-m", "--commonMutations", help="Number of mutations to add to each copy, shared by all in a set. (Default = 0).", default=0, type=int)
self.parser.add_argument("-M", "--randomMutations", help="Number of mutations to add to each copy, randomly chosen for each copy. (Default = 0).", default=0, type=int)
self.parser.add_argument("-t", "--threshold", help="Minimum mutational distance for acceptor/donor samples (Default = 10).", default=10, type=int)
self.parser.add_argument("-f", "--fasta", help="Fasta file containing sequences for acceptor/donor samples. [Must include either -f or -d!]", default='')
self.parser.add_argument("-d", "--differences", help="File containing all mutations relative to reference for each sample (allNodeToMuts.py output by makeMutsFile.py) [Must include either -f or -d!]", default='')
self.parser.add_argument("-r", "--ref", help="Fasta file containing reference genome for use in creating VCF. (Default = 'wuhan.ref.fa').", default='wuhan.ref.fa')
self.parser.add_argument("-S", "--separate", help="If enabled, will produce one MSA as a .fasta file for each set of recombinants to the argument directory. If not enabled, will not produce these files.", default=False)
if inOpts is None:
self.args = vars(self.parser.parse_args())
else:
self.args = vars(self.parser.parse_args(inOpts))
self.args = self.parser.parse_args()
if self.args.breakpoints < 1 or self.args.breakpoints > 4:
sys.stderr.write("Please retry with between 1 and 4 breakpoints.\n")
sys.exit(1)
if self.args.fasta == '' and self.args.differences == '':
sys.stderr.write("Please supply either a MSA via --fasta or a diff file via --differences.\n")
sys.exit(1)
##########################
##### MAIN FUNCTIONS #####
##########################
def makeExamples(myS, myB, myC, myD, myF, myT, mym, myM, myR, mySep):
### Read in reference sequence
posToRef = {}
with open(myR) as f:
for line in f:
l = line.strip()
if not l.startswith('>'):
myReference = l.upper()
for i in range(0,len(myReference)):
posToRef[i] = myReference[i]
else:
myRefName = l[1:]
sys.stderr.write("Finished reading in reference.\n")
### Read in either differences file or MSA to get our node sequences
nodeToDiffs = {}
if myD != '':
with open(myD) as f:
for line in f:
splitLine = (line.strip()).split('\t')
if len(splitLine) == 1:
nodeToDiffs[splitLine[0]] = []
else:
nodeToDiffs[splitLine[0]] = splitLine[1].split(',')
sampleToSeq = {}
if myF != '':
with open(myF) as f:
for line in f:
l = line.strip()
if l.startswith('>'):
mySample = l[1:]
else:
sampleToSeq[mySample] = l
sys.stderr.write("Finished reading in Diff/MSA input file.\n")
recSampleToLog = {}
recSampleToSeq = {}
recSampleToDiffBetweenBps = {}
while len(recSampleToSeq.keys()) < myS:
### Get samples for 2 sequences that will make up our recombinant
if myD == '':
samples = numpy.random.choice(list(sampleToSeq.keys()), size=2, replace=False)
mySampleName = 'RECOMB_'+str(myB)+'_'+str(len(recSampleToSeq))+'_'+str(samples[0])+'_'+str(samples[1])
s1 = samples[0]
s2 = samples[1]
elif myF == '':
samples = numpy.random.choice(list(nodeToDiffs.keys()), size=2, replace=False)
mySampleName = 'RECOMB_'+str(myB)+'_'+str(len(recSampleToSeq))+'_'+str(samples[0])+'_'+str(samples[1])
s1 = samples[0]
sampleToSeq[s1] = addMuts(myReference, nodeToDiffs[s1])
s2 = samples[1]
sampleToSeq[s2] = addMuts(myReference, nodeToDiffs[s2])
### Create recombinant sequence from our two samples
myTotalDiff = getDiff(sampleToSeq[s1],sampleToSeq[s2], 0)
if myB == 1:
bps = numpy.random.choice(sorted(list(posToRef.keys())),size=1, replace=False)
bp1 = bps[0]
mySeq = sampleToSeq[s1][:bp1]+sampleToSeq[s2][bp1:]
myDiff = minLen(getDiff(sampleToSeq[s1][:bp1], sampleToSeq[s2][:bp1], 0), getDiff(sampleToSeq[s1][bp1:], sampleToSeq[s2][bp1:], bp1))
print(myDiff, myT)
elif myB == 2:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=2, replace=False))
while bps[1]-bps[0] <= 1000:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=2, replace=False))
bp1 = bps[0]
bp2 = bps[1]
mySeq = sampleToSeq[s1][:bp1]+sampleToSeq[s2][bp1:bp2]+sampleToSeq[s1][bp2:]
myDiff = minLen(getDiff(sampleToSeq[s1][bp1:bp2], sampleToSeq[s2][bp1:bp2], bp1), getDiff(sampleToSeq[s1][:bp1]+sampleToSeq[s1][bp2:], sampleToSeq[s2][:bp1]+sampleToSeq[s2][bp2:], 0))
elif myB == 3:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=3, replace=False))
while bps[1]-bps[0] <= 1000 or bps[2]-bps[1] <= 1000:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=3, replace=False))
bp1 = bps[0]
bp2 = bps[1]
bp3 = bps[2]
mySeq = sampleToSeq[s1][:bp1]+sampleToSeq[s2][bp1:bp2]+sampleToSeq[s1][bp2:bp3]+sampleToSeq[s2][bp3:]
diff1 = getDiff(sampleToSeq[s1][bp1:bp2]+sampleToSeq[s1][bp3:], sampleToSeq[s2][bp1:bp2]+sampleToSeq[s2][bp3:], bp1)
diff2 = getDiff(sampleToSeq[s1][:bp1]+sampleToSeq[s1][bp2:bp3], sampleToSeq[s2][:bp1]+sampleToSeq[s2][bp2:bp3], 0)
myDiff = minLen(diff1, diff2)
elif myB == 4:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=4, replace=False))
while bps[1]-bps[0] <= 1000 or bps[2]-bps[1] <= 1000 or bps[3]-bps[2] <= 1000:
bps = sorted(numpy.random.choice(sorted(list(posToRef.keys())),size=4, replace=False))
bp1 = bps[0]
bp2 = bps[1]
bp3 = bps[2]
bp4 = bps[3]
mySeq = sampleToSeq[s1][:bp1]+sampleToSeq[s2][bp1:bp2]+sampleToSeq[s1][bp2:bp3]+sampleToSeq[s2][bp3:bp4]+sampleToSeq[s1][bp4:]
diff1 = getDiff(sampleToSeq[s1][bp1:bp2]+sampleToSeq[s1][bp3:bp4], sampleToSeq[s2][bp1:bp2]+sampleToSeq[s2][bp3:bp4], bp1)
diff2 = getDiff(sampleToSeq[s1][:bp1]+sampleToSeq[s1][bp2:bp3]+sampleToSeq[s1][bp4:], sampleToSeq[s2][:bp1]+sampleToSeq[s2][bp2:bp3]+sampleToSeq[s2][bp4:], 0)
myDiff = minLen(diff1, diff2)
### If the differences between the parents of our recombinant is above the threshold between all adjacent breakpoint pairs, keep it
if len(myDiff) >= myT:
recSampleToLog[mySampleName] = [joiner(samples), joiner(bps), len(myTotalDiff), len(myDiff)]
if mym > 0: ### Add common mutations here, prior to making copies
myMuts = []
for m in range(0, mym):
mySeq, myMut = addMut(mySeq, numpy.random.choice(sorted(list(posToRef.keys())),size=1, replace=False)[0])
myMuts.append(myMut)
recSampleToLog[mySampleName].append(joiner(myMuts))
recSampleToSeq[mySampleName] = mySeq
recSampleToDiffBetweenBps[mySampleName] = myDiff
sys.stderr.write("Generated "+str(len(recSampleToSeq.keys()))+" recombinant sequences.\n")
sys.stderr.write("Finished generating recombinant sequences.\n")
bpToHeader = {}
bpToHeader[1] = 'recombinant_sample\tparent1\tparent2\tgenetic_distance\tmutations_in_recomb_tract\tbp1\n'
bpToHeader[2] = 'recombinant_sample\tparent1\tparent2\tgenetic_distance\tmutations_in_recomb_tract\tbp1\tbp2\n'
bpToHeader[3] = 'recombinant_sample\tparent1\tparent2\tgenetic_distance\tmutations_in_recomb_tract\tbp1\tbp2\tbp3\n'
bpToHeader[4] = 'recombinant_sample\tparent1\tparent2\tgenetic_distance\tmutations_in_recomb_tract\tbp1\tbp2\tbp3\tbp4\n'
### Write our MSA, which we will use to create a VCF to add to the starting .pb via faToVcf and UShER
myOutMSA = '>'+myRefName+'\n'+myReference+'\n'
mySepMSAs = []
myOutLog = bpToHeader[myB]
myOutDiff = ''
for s in recSampleToSeq:
tempMSA = '>'+myRefName+'\n'+myReference+'\n' ### Fasta should have reference sequence first, for use with faToVcf
for x in range(0,myC):
if myM > 0:
myMuts = []
mySeq = recSampleToSeq[s]
for m in range(0, myM):
mySeq, myMut = addMut(mySeq, numpy.random.choice(sorted(list(posToRef.keys())),size=1, replace=False)[0])
myMuts.append(myMut)
myOutMSA += '>'+s+'_X'+str(x)+'\n'+mySeq+'\n'
tempMSA += '>'+s+'_X'+str(x)+'\n'+mySeq+'\n'
myOutLog += s+'_X'+str(x)+'\t'+doubleJoiner(recSampleToLog[s])+'\t'+joiner(myMuts)+'\n'
myOutDiff += s+'_X'+str(x)+'\t'+joinerC(recSampleToDiffBetweenBps[s])+'\n'
else:
myOutMSA += '>'+s+'_X'+str(x)+'\n'+recSampleToSeq[s]+'\n'
tempMSA += '>'+s+'_X'+str(x)+'\n'+recSampleToSeq[s]+'\n'
myOutLog += s+'_X'+str(x)+'\t'+doubleJoiner(recSampleToLog[s])+'\n'
myOutDiff += s+'_X'+str(x)+'\t'+joinerC(recSampleToDiffBetweenBps[s])+'\n'
mySepMSAs.append(tempMSA)
open('recombination_'+str(myB)+'_'+str(myC)+'_'+str(myM)+'.msa.fa','w').write(myOutMSA)
open('recombination_'+str(myB)+'_'+str(myC)+'_'+str(myM)+'.log','w').write(myOutLog)
open('recombination_'+str(myB)+'_'+str(myC)+'_'+str(myM)+'.differences.txt','w').write(myOutDiff)
myOutFaToVcf = ''
if mySep != False:
if not os.path.exists(mySep):
os.mkdir('./'+mySep)
for i in range(1,len(mySepMSAs)+1):
open('./'+mySep+'/recombinant_set_'+str(i)+'.fa','w').write(mySepMSAs[i-1])
myOutFaToVcf += 'faToVcf recombinant_set_'+str(i)+'.fa recombinant_set_'+str(i)+'.vcf\n'
open('./'+mySep+'/makeSetVCFs.sh','w').write(myOutFaToVcf)
##########################
#### HELPER FUNCTIONS ####
##########################
def addMut(seq, pos):
#print(pos, len(seq))
myReturn = []
for i in range(0,len(seq)):
if i != int(pos):
myReturn.append(seq[i])
else:
if seq[i] == 'A':
mySub = numpy.random.choice(['C','G','T'], size=1)[0]
elif seq[i] == 'C':
mySub = numpy.random.choice(['A','G','T'], size=1)[0]
elif seq[i] == 'G':
mySub = numpy.random.choice(['C','A','T'], size=1)[0]
elif seq[i] == 'T':
mySub = numpy.random.choice(['A','G','C'], size=1)[0]
else:
mySub = numpy.random.choice(['A','G','C','T'], size=1)[0]
myMut = seq[i]+str(pos)+mySub
myReturn.append(mySub)
return(''.join(myReturn), myMut)
def addMuts(ref, muts):
myReturn = []
for k in list(ref):
myReturn.append(k)
if len(muts) > 0:
for k in muts:
myCoord = int(k[1:-1])-1
if not myReturn[myCoord] == k[0]:
print(k, myReturn[myCoord])
myReturn[myCoord] = str(k[-1])
if not myReturn[myCoord] == k[-1]:
print(k, myReturn[myCoord])
return(''.join(myReturn))
def getDiff(s1, s2, add):
myReturn = []
for i in range(0,len(s1)):
if s1[i] != s2[i]:
myReturn.append(add+i)
return(myReturn)
def minLen(l1, l2):
if len(l1) <= len(l2):
return(l1)
else:
return(l2)
def doubleJoiner(myList):
myReturn = []
for k in myList:
if type(k) == list:
myReturn.append(joiner(k))
else:
myReturn.append(k)
return(joiner(myReturn))
def replaceSymbols(myEntry):
myEntry = myEntry.replace('|', '_')
myEntry = myEntry.replace('/', '_')
return(myEntry)
def joiner(entry):
newList = []
for k in entry:
newList.append(str(k))
return '\t'.join(newList)
def joinerU(entry):
newList = []
for k in entry:
newList.append(str(k))
return '_'.join(newList)
def joinerC(entry):
newList = []
for k in entry:
newList.append(str(k))
return(','.join(newList))
#########################
##### FUNCTION CALL #####
#########################
def main(myCommandLine=None):
"""
Initializes a CommandLine object and passes the provided
arguments into a new fileConverter object and calls main method.
"""
myCommandLine = CommandLine()
# Necessary files:
if myCommandLine.args.samples:
myS = myCommandLine.args.samples
else:
myS = 100
if myCommandLine.args.breakpoints:
myB = myCommandLine.args.breakpoints
else:
myB = 1
if myCommandLine.args.copies:
myC = myCommandLine.args.copies
else:
myC = 10
if str(myCommandLine.args.threshold):
myT = myCommandLine.args.threshold
else:
myT = 10
if myCommandLine.args.fasta:
myF = myCommandLine.args.fasta
else:
myF = ''
if myCommandLine.args.commonMutations:
mym = myCommandLine.args.commonMutations
else:
mym = 0
if myCommandLine.args.randomMutations:
myM = myCommandLine.args.randomMutations
else:
myM = 0
if myCommandLine.args.ref:
myR = myCommandLine.args.ref
else:
myR = 'wuhan.ref.fa'
if myCommandLine.args.separate:
mySep = myCommandLine.args.separate
else:
mySep = False
if myCommandLine.args.differences:
myD = myCommandLine.args.differences
else:
myD = ''
makeExamples(myS, myB, myC, myD, myF, myT, mym, myM, myR, mySep)
if __name__ == "__main__":
"""
Calls main when program is run by user.
"""
main();
raise SystemExit
if __name__ == "__main__":
"""
Calls main when program is run by user.
"""
main();
raise SystemExit
|
bbde9886165f01adc74a0c70d3b94868077c0486
|
e665502aadb0c97d611a6a929dd1976763ca171c
|
/modules/lift_ori.py
|
6f4430a4d217a7afb5f3a79d4645db5cb3cf1704
|
[] |
no_license
|
cvlab-epfl/tf-lift
|
b5da55596818809a44b9cb5a3c8a24655e84d1b0
|
5341909002e0a3269a115dc7f9ff7b5330961052
|
refs/heads/master
| 2022-12-25T19:42:27.275298
| 2020-09-30T22:16:25
| 2020-09-30T22:16:25
| 107,135,620
| 202
| 67
| null | 2020-04-13T15:54:52
| 2017-10-16T14:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,385
|
py
|
lift_ori.py
|
# lift_ori.py ---
#
# Filename: lift_ori.py
# Description: WRITEME
# Author: Kwang Moo Yi
# Maintainer: Kwang Moo Yi
# Created: Wed Jun 28 20:02:50 2017 (+0200)
# Version:
# Package-Requires: ()
# URL:
# Doc URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change Log:
#
#
#
# Copyright (C), EPFL Computer Vision Lab.
# Code:
import tensorflow as tf
from layers import batch_norm, conv_2d, fc, ghh, pool_max
from modules.bypass import bypass_ori
from utils import image_summary_nhwc
def process(inputs, bypass, name, skip, config, is_training):
"""WRITEME.
inputs: input to the network
bypass: gt to by used when trying to bypass
name: name of the siamese branch
skip: whether to apply the bypass information
"""
# let's look at the inputs that get fed into this layer
image_summary_nhwc(name + "-input", inputs)
if skip:
return bypass_ori(bypass)
# we always expect a dictionary as return value to be more explicit
res = {}
# now abuse cur_in so that we can simply copy paste
cur_in = inputs
# lets apply batch normalization on the input - we did not normalize the
# input range!
with tf.variable_scope("input-bn"):
if config.use_input_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
with tf.variable_scope("conv-act-pool-1"):
cur_in = conv_2d(cur_in, 5, 10, 1, "VALID")
if config.use_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
cur_in = tf.nn.relu(cur_in)
cur_in = pool_max(cur_in, 2, 2, "VALID")
with tf.variable_scope("conv-act-pool-2"):
cur_in = conv_2d(cur_in, 5, 20, 1, "VALID")
if config.use_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
cur_in = tf.nn.relu(cur_in)
cur_in = pool_max(cur_in, 2, 2, "VALID")
with tf.variable_scope("conv-act-pool-3"):
cur_in = conv_2d(cur_in, 3, 50, 1, "VALID")
if config.use_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
cur_in = tf.nn.relu(cur_in)
cur_in = pool_max(cur_in, 2, 2, "VALID")
# res["ori_out3"] = cur_in
with tf.variable_scope("fc-ghh-drop-4"):
nu = 100
ns = 4
nm = 4
cur_in = fc(cur_in, nu * ns * nm)
# cur_in = fc(cur_in, nu)
if config.use_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
if config.ori_activation == 'ghh':
cur_in = ghh(cur_in, ns, nm)
elif config.ori_activation == 'tanh':
cur_in = tf.nn.tanh(cur_in)
else:
raise RuntimeError("Bad orientation rectifier")
# cur_in = tf.nn.relu(cur_in)
if config.use_dropout_ori:
raise RuntimeError('Dropout not working properly!')
cur_in = tf.nn.dropout(
cur_in,
keep_prob=1.0 - (0.3 * tf.cast(is_training, tf.float32)),
)
# res["ori_out4"] = cur_in
with tf.variable_scope("fc-ghh-5"):
nu = 2
ns = 4
nm = 4
cur_in = fc(cur_in, nu * ns * nm)
# cur_in = fc(cur_in, nu)
if config.use_batch_norm:
cur_in = batch_norm(cur_in, training=is_training)
if config.ori_activation == 'ghh':
cur_in = ghh(cur_in, ns, nm)
elif config.ori_activation == 'tanh':
cur_in = tf.nn.tanh(cur_in)
else:
raise RuntimeError("Bad orientation rectifier")
# cur_in = tf.nn.relu(cur_in)
# res["ori_out5"] = cur_in
# with tf.variable_scope("fc-ghh-6"):
# cur_in = fc(cur_in, nu)
# res["ori_out6"] = cur_in
with tf.variable_scope("cs-norm"):
eps = 1e-10
# First, normalize according to the maximum of the two
cur_in_abs_max = tf.reduce_max(tf.abs(cur_in), axis=1, keep_dims=True)
cur_in = cur_in / tf.maximum(eps, cur_in_abs_max)
# Add an epsilon to avoid singularity
eps = 1e-3
cur_in += tf.to_float(cur_in >= 0) * eps - tf.to_float(cur_in < 0) * eps
# Now make norm one without worrying about div by zero
cur_in_norm = tf.sqrt(tf.reduce_sum(tf.square(
cur_in), axis=1, keep_dims=True))
cur_in /= cur_in_norm
res["cs"] = tf.reshape(cur_in, (-1, 2))
return res
#
# lift_ori.py ends here
|
499c539212c14e60b0adecc683c940442d641b4f
|
a5622dafafd782af153be2bc0bd19cb086fd07b2
|
/rest-service/manager_rest/storage/models_base.py
|
da99019d0a2e076e9ea54bb72274f2df40b07208
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-manager
|
8b2d226ad5a9dd8103d7690b2f8081bef24078e1
|
c0de6442e1d7653fad824d75e571802a74eee605
|
refs/heads/master
| 2023-09-06T09:11:51.753912
| 2023-09-04T08:01:58
| 2023-09-04T08:01:58
| 18,326,574
| 146
| 84
|
Apache-2.0
| 2023-09-04T08:02:00
| 2014-04-01T11:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 11,229
|
py
|
models_base.py
|
from datetime import datetime
import json
from typing import Any
from collections import OrderedDict
from alembic.util.sqla_compat import _literal_bindparam
from dateutil import parser as date_parser
from sqlalchemy import inspect
from flask_sqlalchemy import SQLAlchemy, BaseQuery
from flask_restful import fields as flask_fields
from sqlalchemy import MetaData
from sqlalchemy.ext.associationproxy import (ASSOCIATION_PROXY,
AssociationProxyInstance)
from sqlalchemy.ext.hybrid import HYBRID_PROPERTY
from sqlalchemy.orm.interfaces import NOT_EXTENSION
from cloudify.models_states import VisibilityState
from manager_rest.utils import classproperty, current_tenant
class DBQuery(BaseQuery):
def tenant(self, *tenants):
descrs = self.column_descriptions
if len(descrs) != 1:
raise RuntimeError(
f'Can only apply a tenant filter when querying for a single '
f'entity, but querying for {len(descrs)}')
model = descrs[0]['entity']
if not (getattr(model, 'is_resource', False)
or getattr(model, 'is_label', False)):
raise RuntimeError(
f'Can only apply a tenant filter to resources or labels, '
f'but got {model}')
if not tenants:
tenants = (current_tenant._get_current_object(), )
return self.filter(
db.or_(
model.visibility == VisibilityState.GLOBAL,
model._tenant_id.in_([t.id for t in tenants])
)
)
db = SQLAlchemy(query_class=DBQuery, metadata=MetaData(naming_convention={
# This is to generate migration scripts with constraint names
# using the same naming convention used by PostgreSQL by default
# http://stackoverflow.com/a/4108266/183066
'ix': '%(table_name)s_%(column_0_name)s_idx',
'uq': '%(table_name)s_%(column_0_name)s_key',
'ck': '%(table_name)s_%(column_0_name)s_check',
'fk': '%(table_name)s_%(column_0_name)s_fkey',
'pk': '%(table_name)s_pkey',
}))
# mypy gets really upset about subclassing those directly, but it's fine if
# we give them module-level names. Unfortunate.
TypeDecorator: Any = db.TypeDecorator
Model: Any = db.Model
Column: Any = db.Column
class UTCDateTime(TypeDecorator):
cache_ok = True
impl = db.DateTime
def process_result_value(self, value, engine):
# Adhering to the same norms used in the rest of the code
if value is not None:
# When the date has a microsecond value equal to 0,
# isoformat returns the time as 17:22:11 instead of
# 17:22:11.000, so we need to adjust the returned value
if value.microsecond:
return '{0}Z'.format(value.isoformat()[:-3])
else:
return '{0}.000Z'.format(value.isoformat())
def process_bind_param(self, value, dialect):
if value is None:
return None
if isinstance(value, str):
value = value.strip('Z')
return date_parser.parse(value)
else:
return value.replace(tzinfo=None)
class JSONString(TypeDecorator):
"""A json object stored as a string.
json encoding/decoding is handled by SQLAlchemy, so this type is database
agnostic and is not affected by differences in underlying JSON types
implementations.
"""
cache_ok = True
impl = db.Text
def process_bind_param(self, value, dialect):
"""Encode object to a string before inserting into database."""
try:
return json.dumps(value)
except TypeError:
if isinstance(value, _literal_bindparam):
# this is only ever _literal_bindparam in migrations
return value.value
raise
def process_result_value(self, value, engine):
"""Decode string to an object after selecting from database."""
if value is None:
return
return json.loads(value)
class CIColumn(Column):
"""A column for case insensitive string fields
"""
is_ci = True
def _get_extension_type(desc):
"""Return the extension_type of a SQLAlchemy descriptors.
This also handles proxy descriptors, looking up the extension type on
the proxied-to descriptor.
"""
if isinstance(desc, AssociationProxyInstance):
extension_type = desc.parent.extension_type
else:
extension_type = desc.extension_type
if extension_type is NOT_EXTENSION:
proxied_desc = getattr(desc, 'descriptor', None)
if proxied_desc is not None:
extension_type = proxied_desc.extension_type
return extension_type
class SQLModelBase(Model):
"""Abstract base class for all SQL models that allows [de]serialization
"""
# SQLAlchemy syntax
__abstract__ = True
# Does the class represent a resource (Blueprint, Deployment, etc.) or a
# management table (User, Tenant, etc.), as they are handled differently
is_resource = False
# Can this resource be attached to tenants
top_level_tenant = False
# Does this resource have a unique creator
top_level_creator = False
_sql_to_flask_type_map = {
'Integer': flask_fields.Integer,
'Text': flask_fields.String,
'String': flask_fields.String,
'Unicode': flask_fields.String,
'PickleType': flask_fields.Raw,
'UTCDateTime': flask_fields.String,
'Enum': flask_fields.String,
'Boolean': flask_fields.Boolean,
'ARRAY': flask_fields.Raw,
'JSONString': flask_fields.Raw,
'LargeBinary': flask_fields.Raw,
'Float': flask_fields.Float
}
def ensure_defaults(self):
"""Synchronously set the defaults for this model's properties.
Normally the python-side defaults are only applied when the
model is committed, but if you need them to be applied _right now_,
call this.
This will only apply scalar and callable defaults, of course it
cannot apply defaults that are db-side (eg. a selectable).
"""
for col_name, col in self.__table__.c.items():
if getattr(self, col_name, None) is None and col.default:
if col.default.is_scalar:
value = col.default.arg
elif col.default.is_callable:
value = col.default.arg(self)
setattr(self, col_name, value)
def to_dict(self, suppress_error=False):
"""Return a dict representation of the model
:param suppress_error: If set to True, sets `None` to attributes that
it's unable to retrieve (e.g., if a relationship wasn't established
yet, and so it's impossible to access a property through it)
"""
if suppress_error:
res = dict()
for field in self.resource_fields:
try:
field_value = getattr(self, field)
except AttributeError:
field_value = None
res[field] = field_value
else:
# Can't simply call here `self.to_response()` because inheriting
# class might override it, but we always need the same code here
res = {f: getattr(self, f) for f in self.resource_fields}
full_response = self.to_response()
# resource_availability is deprecated.
# For backwards compatibility - adding it to the response.
if 'resource_availability' in full_response:
res['resource_availability'] = \
full_response['resource_availability']
return res
def to_response(self, include=None, **kwargs):
include = include or self.resource_fields
return {
f: getattr(self, f) for f in self.resource_fields if f in include
}
@classproperty
def resource_fields(cls):
"""Return a mapping of available field names and their corresponding
flask types
"""
fields = dict()
columns = inspect(cls).columns
columns_dict = {col.name: col.type for col in columns
if not col.name.startswith('_')}
columns_dict.update(cls._get_orm_descriptors())
for field_name, field_type in columns_dict.items():
field_type_name = field_type.__class__.__name__
fields[field_name] = cls._sql_to_flask_type_map[field_type_name]
return fields
@classmethod
def _get_orm_descriptors(cls):
"""Return a dictionary with all ORM descriptor names as keys, and
their types (TEXT, DateTime, etc.) as values.
"""
# The descriptor needs to be invoked once (using __get__) in order
# to have access to its attributes (e.g. `remote_attr`)
all_descs = {name: desc.__get__(None, cls)
for name, desc in inspect(cls).all_orm_descriptors.items()
if not name.startswith('_')}
attrs_dict = dict()
for name, desc in all_descs.items():
extension_type = _get_extension_type(desc)
if extension_type is ASSOCIATION_PROXY:
# Association proxies must be followed to get their type
while not is_orm_attribute(desc.remote_attr):
desc = desc.remote_attr
# Get the type of the remote attribute
attrs_dict[name] = desc.remote_attr.expression.type
elif extension_type is HYBRID_PROPERTY:
attrs_dict[name] = desc.type
return attrs_dict
def _get_identifier_dict(self):
"""A helper method that allows classes to override if in order to
change the default string representation
"""
return OrderedDict([('id', self.id)])
@classmethod
def unique_id(cls):
return 'id'
@classmethod
def default_sort_column(cls):
"""If no sort is requested, order by the column specified by this.
This is so that requests with pagination make sense even with no
sort requested by the user.
"""
return getattr(cls, cls.unique_id())
def __repr__(self):
"""Return a representation of the class, based on the ordered dict of
identifiers returned by `_get_identifier_dict`
"""
id_dict = self._get_identifier_dict()
class_name = self.__class__.__name__
_repr = ' '.join('{0}=`{1}`'.format(k, v) for k, v in id_dict.items())
return '<{0} {1}>'.format(class_name, _repr)
@classproperty
def is_label(cls):
return hasattr(cls, 'labeled_model')
def is_orm_attribute(item):
if isinstance(item, AssociationProxyInstance):
return False
if not hasattr(item, 'is_attribute'):
return False
return item.is_attribute
class CreatedAtMixin(object):
created_at = db.Column(UTCDateTime, nullable=False, index=True,
default=lambda: datetime.utcnow())
@classmethod
def default_sort_column(cls):
"""Models that have created_at, sort on it by default."""
return cls.created_at
|
24226f5fa657b9a680ed27ca4b2a45c6424cc51c
|
850fb312d6cfa25546369b4950c47b04231dce8e
|
/src/gt4sd/frameworks/gflownet/train/core.py
|
12b0ecebf48061eb5b0919d633b4e167ae0251d4
|
[
"MIT"
] |
permissive
|
GT4SD/gt4sd-core
|
825418303547c36cf64575ac4f8711877fd7e16b
|
0b69b7d5b261f2f9af3984793c1295b9b80cd01a
|
refs/heads/main
| 2023-09-02T21:23:46.156469
| 2023-08-30T08:28:40
| 2023-08-30T08:28:40
| 458,309,249
| 239
| 50
|
MIT
| 2023-08-25T06:14:52
| 2022-02-11T19:06:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,208
|
py
|
core.py
|
#
# MIT License
#
# Copyright (c) 2022 GT4SD team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Train module implementation."""
import logging
from argparse import Namespace
from typing import Any, Dict
import sentencepiece as _sentencepiece
import torch as _torch
import tensorflow as _tensorflow
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from ..arg_parser.parser import parse_arguments_from_config
# from ..dataloader import build_dataset
from ..dataloader.data_module import GFlowNetDataModule
from ..dataloader.dataset import GFlowNetDataset, GFlowNetTask
# from ..envs import build_env_context
from ..envs.graph_building_env import GraphBuildingEnv, GraphBuildingEnvContext
from ..loss import ALGORITHM_FACTORY
from ..ml.models import MODEL_FACTORY
from ..ml.module import GFlowNetModule
# from ..train import build_task
# imports that have to be loaded before lightning to avoid segfaults
_sentencepiece
_tensorflow
_torch
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def train_gflownet(
configuration: Dict[str, Any],
dataset: GFlowNetDataset,
environment: GraphBuildingEnv,
context: GraphBuildingEnvContext,
task: GFlowNetTask,
) -> None:
"""Train a gflownet given a configuration, a dataset and a task.
The default enviroment and context are compatible with small molecules.
Args:
configuration: a configuration dictionary.
dataset: a dataset compatible with lightning.
environment: an environment specifying the state space.
context: an environment context specifying how to combine states.
task: a task specifying the reward structure.
"""
arguments = Namespace(**configuration)
if arguments.algorithm in ALGORITHM_FACTORY:
algorithm = ALGORITHM_FACTORY[getattr(arguments, "algorithm")](
configuration=configuration,
environment=environment,
context=context,
)
else:
raise ValueError(f"Algorithm {arguments.algorithm} not supported.")
if arguments.model in MODEL_FACTORY:
model = MODEL_FACTORY[getattr(arguments, "model")](
configuration=configuration,
context=context,
)
else:
raise ValueError(f"Model {arguments.model} not supported.")
dm = GFlowNetDataModule(
configuration=configuration,
dataset=dataset,
environment=environment,
context=context,
task=task,
algorithm=algorithm,
model=model,
)
dm.prepare_data()
module = GFlowNetModule(
configuration=configuration,
dataset=dataset,
environment=environment,
context=context,
task=task,
algorithm=algorithm,
model=model,
)
tensorboard_logger = TensorBoardLogger(
"logs", name=getattr(arguments, "basename", "default")
)
checkpoint_callback = ModelCheckpoint(
save_top_k=-1,
)
trainer = pl.Trainer.from_argparse_args(
arguments,
profiler="simple",
logger=tensorboard_logger,
auto_lr_find=True,
log_every_n_steps=getattr(arguments, "trainer_log_every_n_steps", 50),
callbacks=[checkpoint_callback],
max_epochs=getattr(arguments, "epoch", 10),
check_val_every_n_epoch=getattr(arguments, "checkpoint_every_n_val_epochs", 5),
fast_dev_run=getattr(arguments, "development_mode", False),
strategy=getattr(arguments, "strategy", "ddp"),
)
trainer.fit(module, dm)
def train_gflownet_main(
configuration: Dict[str, Any],
dataset: GFlowNetDataset,
environment: GraphBuildingEnv,
context: GraphBuildingEnvContext,
task: GFlowNetTask,
) -> None:
"""Train a gflownet module parsing arguments from config and standard input."""
# add user configuration
configuration.update(vars(parse_arguments_from_config()))
# train gflownet
train_gflownet(
configuration=configuration,
dataset=dataset,
environment=environment,
context=context,
task=task,
)
|
2e8692153e8631b8e0a381191beddedb83a9b760
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/projects/WindowsSystemOps/Services/pyAutoResetPrinterWin32.py
|
c9fdba47c8f7031abcb85ddd9dc5a51b53b36df7
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,573
|
py
|
pyAutoResetPrinterWin32.py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf-8 -*-
"""
Created by PyCharm.
File Name: LinuxBashShellScriptForOps:pyAutoResetPrinterWin32.py
Version: 0.0.1
Author: Guodong
Author Email: dgdenterprise@gmail.com
URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps
Download URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps/tarball/master
Create Date: 2018/10/10
Create Time: 10:44
Description: auto reset Spooler(Print Spooler) service when printer failure occurs
Long Description:
References: http://timgolden.me.uk/pywin32-docs/win32print.html
Prerequisites: pypiwin32: pip install pypiwin32
Optional: install 'pywin32'
Development Status: 3 - Alpha, 5 - Production/Stable
Environment: Console
Intended Audience: System Administrators, Developers, End Users/Desktop
License: Freeware, Freely Distributable
Natural Language: English, Chinese (Simplified)
Operating System: POSIX :: Linux, Microsoft :: Windows
Programming Language: Python :: 2.6
Programming Language: Python :: 2.7
Topic: Utilities
"""
import os
import sys
import time
from collections import Counter
from hashlib import md5
import win32print
import win32service
import win32serviceutil
def reset_printer():
"""
Note: administrator privilege is required
this function do three things:
1. stop Print Spooler service
2. delete all job files
3. start Print Spooler service
:return:
"""
service_name = 'spooler'.capitalize()
win_dir = os.environ.get('windir', r'C:\Windows')
printer_path = r"System32\spool\PRINTERS"
path = os.path.join(win_dir, printer_path)
status_code_map = {
0: "UNKNOWN",
1: "STOPPED",
2: "START_PENDING",
3: "STOP_PENDING",
4: "RUNNING"
}
print "printer spool folder is: %s" % path
if os.path.exists(path):
if os.listdir(path):
print "reset printer spooler service in progress ..."
status_code = win32serviceutil.QueryServiceStatus(service_name)[1]
if status_code == win32service.SERVICE_RUNNING or status_code == win32service.SERVICE_START_PENDING:
print "stopping service {service}".format(service=service_name)
win32serviceutil.StopService(serviceName=service_name)
# waiting for service stop, in case of WindowsError exception
# 'WindowsError: [Error 32]' which means
# 'The process cannot access the file because it is being used by another process'.
running_flag = True
while running_flag:
print "waiting for service {service} stop.".format(service=service_name)
status_code = win32serviceutil.QueryServiceStatus(service_name)[1]
time.sleep(2)
if status_code == win32service.SERVICE_STOPPED:
running_flag = False
for top, dirs, nondirs in os.walk(path, followlinks=True):
for item in nondirs:
path_to_remove = os.path.join(top, item)
try:
os.remove(path_to_remove)
except WindowsError:
time.sleep(2)
r""" KNOWN ISSUE:
It will also can NOT remove some files in some Windows, such as 'Windows Server 2012'
Because file maybe used by a program named "Print Filter Pipeline Host",
"C:\Windows\System32\printfilterpipelinesvc.exe"
It will throw out 'WindowsError: [Error 32]' exception again.
"""
os.remove(path_to_remove)
except Exception as e:
print e
print e.args
print e.message
print "file removed: {file}".format(file=path_to_remove)
status_code = win32serviceutil.QueryServiceStatus(service_name)[1]
if status_code != win32service.SERVICE_RUNNING and status_code != win32service.SERVICE_START_PENDING:
print "starting service {service}".format(service=service_name)
win32serviceutil.StartService(serviceName=service_name)
else:
print "current printer spooler in good state, skipped."
else:
print "Error: {path} not found, system files broken!".format(path=path)
sys.exit(1)
status_code = win32serviceutil.QueryServiceStatus(service_name)[1]
if status_code == win32service.SERVICE_RUNNING or status_code == win32service.SERVICE_START_PENDING:
print "[OK] reset printer spooler service successfully!"
else:
print "current service code is {code}, and service state is {state}.".format(code=status_code,
state=status_code_map[status_code])
try:
print "trying start spooler service..."
win32serviceutil.StartService(serviceName=service_name)
status_code = win32serviceutil.QueryServiceStatus(service_name)[1]
if status_code == win32service.SERVICE_RUNNING or status_code == win32service.SERVICE_START_PENDING:
print "service {service} started.".format(service=service_name)
except Exception as e:
print e
print [msg for msg in e.args]
def printer_watchdog():
print win32print.EnumPrinters(win32print.PRINTER_ENUM_LOCAL) # get local printers
print win32print.EnumPrinters(win32print.PRINTER_ENUM_CONNECTIONS) # get printers which other computer shared
default_printer_name = win32print.GetDefaultPrinter()
printer = win32print.OpenPrinter(default_printer_name)
print win32print.GetPrinter(printer)
jobs_list = list()
total_seconds = 60 * 5 # reset after 60*5 seconds, see 'known issue 2' in this file.
sleep_seconds = 10
times = total_seconds / sleep_seconds
current_times = 0
while True:
jobs = win32print.EnumJobs(printer, 0, 3, 1)
# except: pywintypes.error: (1722, 'EnumJobs', 'RPC 服务器不可用。'), ignored this except
# 0 is location of first job,
# 3 is number of jobs to enumerate,
# 1 is job info level, can be 1(win32print.JOB_INFO_1), 2, 3. 3 is reserved, 1 and 2 can NOT get job status, :(
if len(jobs) >= 1:
for job in jobs:
filename = job.get('pDocument')
job_id = job.get('JobId', md5(filename).hexdigest())
job_status = job.get('Status', 0)
if job_status in [0x00000002, 0x00000004, 0x00000800]: # JOB_STATUS_ERROR
"""
Refers:
https://docs.microsoft.com/en-us/windows/desktop/printdocs/job-info-2
~\AppData\Local\Programs\Common\Microsoft\Visual C++ for Python\9.0\WinSDK\Include\WinSpool.h
"""
print "printer need to be reset, ... "
reset_printer()
jobs_list = [] # make sure there are not same job id in list
current_times = 0
print "Current job: ", job_id, job.get('pUserName'), job.get('Submitted'), job.get(
'pMachineName'), filename, "[ %d/%d ]" % (times, current_times + 1)
jobs_list.append(job_id)
# if any([jid in jobs_list for jid in (jobs[0].get('JobId'), jobs[-1].get('JobId'))]):
# current_times += 1
if Counter(jobs_list).most_common(1)[0][1] > 1:
current_times += 1
if current_times > times:
""" KNOWN ISSUE 2:
It will reset when a document sends lots of pages to printer.
This script may reset printer before job finished which is not expected.
"""
print "printer need to be reset, ... "
reset_printer()
jobs_list = [] # make sure there are not same job id in list
current_times = 0
else:
jobs_list = []
current_times = 0
print 'looks good, keep watching ...'
time.sleep(sleep_seconds)
if __name__ == '__main__':
printer_watchdog()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.