hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acedf43ae260d2b640b05876bf731151c4d43333 | 1,302 | py | Python | gitfs/utils/decorators/write_operation.py | josephwinston/gitfs | 6d5387f75696f432ef0702017a92130d090bb999 | [
"Apache-2.0"
] | null | null | null | gitfs/utils/decorators/write_operation.py | josephwinston/gitfs | 6d5387f75696f432ef0702017a92130d090bb999 | [
"Apache-2.0"
] | null | null | null | gitfs/utils/decorators/write_operation.py | josephwinston/gitfs | 6d5387f75696f432ef0702017a92130d090bb999 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 PressLabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from errno import EROFS
from functools import wraps
from fuse import FuseOSError
from gitfs.events import (sync_done, syncing, writers, push_successful,
fetch_successful)
from gitfs.log import log
def write_operation(f):
@wraps(f)
def decorated(*args, **kwargs):
if not fetch_successful.is_set() or not push_successful.is_set():
raise FuseOSError(EROFS)
global writers
writers += 1
if syncing.is_set():
log.debug("WriteOperation: Wait until syncing is done")
sync_done.wait()
try:
result = f(*args, **kwargs)
finally:
writers -= 1
return result
return decorated
| 28.304348 | 74 | 0.677419 |
acedf519b7a637236e924e1350f1d915f323e8d7 | 14,979 | py | Python | lib/cherrypy/lib/static.py | nkvoronov/script.module.cherrypy | 2573349bb7bba9b46102387ef3105f75cc8a82f9 | [
"BSD-3-Clause"
] | null | null | null | lib/cherrypy/lib/static.py | nkvoronov/script.module.cherrypy | 2573349bb7bba9b46102387ef3105f75cc8a82f9 | [
"BSD-3-Clause"
] | null | null | null | lib/cherrypy/lib/static.py | nkvoronov/script.module.cherrypy | 2573349bb7bba9b46102387ef3105f75cc8a82f9 | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import stat
import mimetypes
try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
def serve_file(path, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except (OSError, TypeError, ValueError):
# OSError when file fails to stat
# TypeError on Python 2 when there's a null byte
# ValueError on Python 3 when there's a null byte
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ''
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers['Content-Disposition'] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False, filesize=None):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = filesize
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers['Accept-Ranges'] = 'bytes'
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = 'bytes */%s' % content_length
message = ('Invalid Range (first-byte-pos greater than '
'Content-Length)')
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log(
'Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = '206 Partial Content'
response.headers['Content-Range'] = (
'bytes %s-%s/%s' % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = '206 Partial Content'
try:
# Python 3
from email.generator import _make_boundary as make_boundary
except ImportError:
# Python 2
from mimetools import choose_boundary as make_boundary
boundary = make_boundary()
ct = 'multipart/byteranges; boundary=%s' % boundary
response.headers['Content-Type'] = ct
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
def file_ranges():
# Apache compatibility:
yield ntob('\r\n')
for start, stop in r:
if debug:
cherrypy.log(
'Multipart; start: %r, stop: %r' % (
start, stop),
'TOOLS.STATIC')
yield ntob('--' + boundary, 'ascii')
yield ntob('\r\nContent-type: %s' % content_type,
'ascii')
yield ntob(
'\r\nContent-range: bytes %s-%s/%s\r\n\r\n' % (
start, stop - 1, content_length),
'ascii')
fileobj.seek(start)
gen = file_generator_limited(fileobj, stop - start)
for chunk in gen:
yield chunk
yield ntob('\r\n')
# Final boundary
yield ntob('--' + boundary + '--', 'ascii')
# Apache compatibility:
yield ntob('\r\n')
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, 'application/x-download', 'attachment', name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root='', match='', content_types=None, index='',
debug=False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = 'Static dir requires an absolute dir (or root).'
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = '/'
section = section.rstrip(r'\/')
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r'\/'))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r'\/')
return handled
def staticfile(filename, root=None, match='', content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % (
filename,)
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| 39.212042 | 79 | 0.600374 |
acedf522920136e326ee66946a8b4b94b49805cc | 4,925 | py | Python | pb_max17043.py | pabou38/water-heater | e3297145f32df00cae56ad4047503e9556793cba | [
"MIT"
] | null | null | null | pb_max17043.py | pabou38/water-heater | e3297145f32df00cae56ad4047503e9556793cba | [
"MIT"
] | null | null | null | pb_max17043.py | pabou38/water-heater | e3297145f32df00cae56ad4047503e9556793cba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
max17043 library for MicroPython
this is a lipo battery cells fuel gauge made by maxim
https://datasheets.maximintegrated.com/en/ds/MAX17043-MAX17044.pdf
small module by sparkfun
https://www.sparkfun.com/products/10617
based upon the max17043 library for arduino by lucadentella
https://github.com/lucadentella/ArduinoLib_MAX17043
Andre Peeters
2017/10/31
"""
import binascii
#from machine import Pin, I2C
from micropython import const # PABOU
REGISTER_VCELL = const(0X02)
REGISTER_SOC = const(0X04)
REGISTER_MODE = const(0X06)
REGISTER_VERSION = const(0X08)
REGISTER_CONFIG = const(0X0C)
REGISTER_COMMAND = const(0XFE)
class max17043:
"""
REGISTER_VCELL = const(0X02)
REGISTER_SOC = const(0X04)
REGISTER_MODE = const(0X06)
REGISTER_VERSION = const(0X08)
REGISTER_CONFIG = const(0X0C)
REGISTER_COMMAND = const(0XFE)
"""
# p9 sda
#def __init__(self, pins=('P9','P10')): # pin not used, specific to pyboard
# PABOU
def __init__(self, i2c): # use existing i2c
self.i2c = i2c # PABOU to be accessible thru the driver
"""
init the module and set the pins used for i2c
scans for the i2c adress (returns the first 1 found)
"""
# PABOU . i2c created in main
#self.pins = tuple(pins)
#self.i2c = I2C(0, pins=pins)
#print('init I2C on bus 1 for MAX17043 with non pyboard pins')
#self.i2c = I2C(1, sda=Pin(21), scl=Pin(22)) # bus 1
#self.i2c.init(I2C.MASTER) # init as a master
self.max17043Address = (i2c.scan())[0]
print('max17043: i2c address ', self.max17043Address)
def __str__(self):
"""
string representation of the values
"""
rs = "i2c address is {}\n".format( self.max17043Address )
#rs += "i2c pins are {}\n".format( self.pins )
rs += "version is {}\n".format( self.getVersion() )
rs += "vcell is {} v\n".format( self.getVCell() )
rs += "soc is {} %\n".format( self.getSoc() )
rs += "compensatevalue is {}\n".format( self.getCompensateValue() )
rs += "alert threshold is {} %\n".format( self.getAlertThreshold() )
rs += "in alert is {}".format( self.inAlert() )
return rs
def address(self):
"""
return the i2c address
"""
return self.max17043Address
def reset(self):
"""
reset
"""
self.__writeRegister(REGISTER_COMMAND, binascii.unhexlify('0054'))
def getVCell(self):
"""
get the volts left in the cell
"""
buf = self.__readRegister(REGISTER_VCELL)
return (buf[0] << 4 | buf[1] >> 4) /1000.0
def getSoc(self):
"""
get the state of charge
"""
buf = self.__readRegister(REGISTER_SOC)
return (buf[0] + (buf[1] / 256.0) )
def getVersion(self):
"""
get the version of the max17043 module
"""
buf = self.__readRegister(REGISTER_VERSION)
return (buf[0] << 8 ) | (buf[1])
def getCompensateValue(self):
"""
get the compensation value
"""
return self.__readConfigRegister()[0]
def getAlertThreshold(self):
"""
get the alert level
"""
return ( 32 - (self.__readConfigRegister()[1] & 0x1f) )
def setAlertThreshold(self, threshold):
"""
sets the alert level
"""
self.threshold = 32 - threshold if threshold < 32 else 32
buf = self.__readConfigRegister()
buf[1] = (buf[1] & 0xE0) | self.threshold
self.__writeConfigRegister(buf)
def inAlert(self):
"""
check if the the max17043 module is in alert
"""
return (self.__readConfigRegister())[1] & 0x20
def clearAlert(self):
"""
clears the alert
"""
self.__readConfigRegister()
def quickStart(self):
"""
does a quick restart
"""
self.__writeRegister(REGISTER_MODE, binascii.unhexlify('4000'))
def __readRegister(self,address):
"""
reads the register at address, always returns bytearray of 2 char
"""
return self.i2c.readfrom_mem(self.max17043Address,address,2)
def __readConfigRegister(self):
"""
read the config register, always returns bytearray of 2 char
"""
return self.__readRegister(REGISTER_CONFIG)
def __writeRegister(self,address,buf):
"""
write buf to the register address
"""
self.i2c.writeto_mem(self.max17043Address,address,buf)
def __writeConfigRegister(self,buf):
"""
write buf to the config register
"""
self.__writeRegister(REGISTER_CONFIG,buf)
def deinit(self):
"""
turn off the peripheral
"""
self.i2c.deinit()
| 27.513966 | 79 | 0.591675 |
acedf5a5daa4b95dbef2e4beb01ed1ffaa9c6fb0 | 1,018 | py | Python | rl_lap/configs/laprepr_config_gridworld.py | manfreddiaz/rl-laplacian | 034803adb5c20c3bb7822b18d675b762fdcc53dc | [
"MIT"
] | null | null | null | rl_lap/configs/laprepr_config_gridworld.py | manfreddiaz/rl-laplacian | 034803adb5c20c3bb7822b18d675b762fdcc53dc | [
"MIT"
] | null | null | null | rl_lap/configs/laprepr_config_gridworld.py | manfreddiaz/rl-laplacian | 034803adb5c20c3bb7822b18d675b762fdcc53dc | [
"MIT"
] | null | null | null | from ..agent import laprepr
from ..envs.gridworld import gridworld_envs
from . import networks
class Config(laprepr.LapReprConfig):
def _set_default_flags(self):
super()._set_default_flags()
flags = self._flags
flags.d = 20
flags.n_samples = 30000
flags.batch_size = 128
flags.discount = 0.9
flags.w_neg = 5.0
flags.c_neg = 1.0
flags.reg_neg = 0.0
flags.replay_buffer_size = 100000
flags.opt_args.name = 'Adam'
flags.opt_args.lr = 0.001
# train
flags.log_dir = '/tmp/rl_laprepr/log'
flags.total_train_steps = 100000
flags.print_freq = 1000
flags.save_freq = 10000
def _obs_prepro(self, obs):
return obs.agent.position
def _env_factory(self):
return gridworld_envs.make(self._flags.env_id)
def _model_factory(self):
return networks.ReprNetMLP(
self._obs_shape, n_layers=3, n_units=256,
d=self._flags.d)
| 26.102564 | 57 | 0.617878 |
acedf5cb3f576a2ab17bc7d2b96ff9d7df32a0f0 | 5,364 | py | Python | homework/pa5/functions.py | zv80/Spring2019-CS212 | 87e2eb7a453c3f37e1c7182225c261f13225d328 | [
"Apache-2.0"
] | null | null | null | homework/pa5/functions.py | zv80/Spring2019-CS212 | 87e2eb7a453c3f37e1c7182225c261f13225d328 | [
"Apache-2.0"
] | null | null | null | homework/pa5/functions.py | zv80/Spring2019-CS212 | 87e2eb7a453c3f37e1c7182225c261f13225d328 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from re import findall
import sqlite3
def calculateEditDistance(first, second):
matrix = []
# construct matrix
for i in range(len(first) + 1):
matrix.append([])
for j in range(len(second) + 1):
matrix[i].append(0)
# fill in first row
for i in range(len(matrix[0])):
matrix[0][i] = i
# fill in first column
for i in range(len(matrix)):
matrix[i][0] = i
# compute rest of matrix
for i in range(1, len(matrix)):
for j in range(1, len(matrix[i])):
top_cost = matrix[i - 1][j] + 1
left_cost = matrix[i][j - 1] + 1
diagonal_cost = matrix[i - 1][j - 1]
# add 1 to diagonal if chars don't match
if(first[i - 1] != second[j - 1]):
diagonal_cost += 1
best_choice = min(top_cost, left_cost, diagonal_cost)
matrix[i][j] = best_choice
return matrix[len(matrix) - 1][len(matrix[0]) - 1]
#-----------------------------------------------------------------------------#
def checkWordExists(file_word):
connection = sqlite3.connect("wordDictionary.db")
word_cursor = connection.cursor()
word_cursor.execute("""SELECT word
FROM Dictionary
WHERE word = :find""", {'find': file_word})
# get word if file_word found in DB
fetched_word = word_cursor.fetchone()
connection.close()
if fetched_word is None:
return False
else:
return True
def classifyEditDistance(word_to_correct):
distances = defaultdict(list)
possible_corrections = []
# if word not found in DB, calculate edit distance
if (checkWordExists(word_to_correct) == False):
connection = sqlite3.connect("wordDictionary.db")
word_cursor = connection.cursor()
word_cursor.execute("SELECT * FROM Dictionary")
# get all the words(rows) in the DB
fetched_word = word_cursor.fetchall()
for word in fetched_word:
word = word[0]
edit = calculateEditDistance(word_to_correct, word)
distances[edit].append(word)
size = len(possible_corrections)
#smallest to largest (edit distance)
for k, values in sorted(distances.items()):
for v in values:
if size < 10: #fill until len is 10
possible_corrections.append((v,k))
size = len(possible_corrections)
else:
return possible_corrections
return possible_corrections
def inputFile(input_file_name):
content = []
with open(input_file_name, 'r') as fix_file:
for line in fix_file:
#separate special chars from word
content += findall(r"[\w']+|[.,!?;\n\t]", line)
for word in content:
index = content.index(word)
if word not in ['.', ',', '?', '!', ';', '\n', '\t']: #check for punctuations
corrections = classifyEditDistance(word)
size = len(corrections)
else:
size = 0
if size > 0:
print("\nUnknown Word: {0}".format(word), end='\n')
print("1. None are correct.")
for count, possible_words in enumerate(corrections, start=2):
print("{0}. {1}".format(count, possible_words[0]), end='\n')
choice = int(input("Enter a selection: "))
connection = sqlite3.connect("wordDictionary.db")
word_cursor = connection.cursor()
while choice > 11 or choice == 0:
choice = int(input("Enter a selection: "))
if choice == 1:
insert_word = input("Enter correct word: ")
word_cursor.execute("INSERT OR IGNORE INTO Dictionary(word) VALUES(:new_word)", {'new_word': insert_word})
connection.commit()
content[index] = insert_word #replace word with correct version
dataFiles(corrections, word, insert_word)
elif choice in range(2,12):
word_index = choice-2
content[index] = corrections[word_index][0] #replace word with correct version
dataFiles(corrections, word, None, word_index)
connection.close()
return content
def destinationFile(destination_file, content):
exceptions = ['.', ',', '?', '!', ';', '\n', '\t']
with open(destination_file, 'w') as fixed_file:
for data in range(len(content)-1):
if content[data+1] in exceptions or content[data] in exceptions:
fixed_file.write("{0}".format(content[data])) #dont add space
else:
fixed_file.write("{0} ".format(content[data])) #add space
fixed_file.write(content[len(content)-1]) #space doesn't matter for last
def dataFiles(corrections, word, input_word=None, position = None):
with open("{0}.dat".format(word), 'w') as dat:
if input_word != None:
dat.write("{0} 0\n".format(input_word))
if position != None:
first = corrections.pop(position)
dat.write("{0} {1}\n".format(*first))
for words in corrections:
dat.write("{0} {1}\n".format(*words))
corrections.clear() #clear data | 34.384615 | 122 | 0.560776 |
acedf6298bdffe06ebc9b108643630a5980d8a82 | 1,721 | py | Python | jmeter_api/basics/controller/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 11 | 2020-03-22T13:30:21.000Z | 2021-12-25T06:23:44.000Z | jmeter_api/basics/controller/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 37 | 2019-12-18T13:12:50.000Z | 2022-02-10T10:52:37.000Z | jmeter_api/basics/controller/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 5 | 2019-12-06T10:55:56.000Z | 2020-06-01T19:32:32.000Z | from abc import ABC
from typing import Union
from jmeter_api.basics.element.elements import BasicElement
from jmeter_api.basics.pre_processor.elements import BasicPreProcessor
from jmeter_api.basics.post_processor.elements import BasicPostProcessor
from jmeter_api.basics.config.elements import BasicConfig
from jmeter_api.basics.sampler.elements import BasicSampler
from jmeter_api.basics.assertion.elements import BasicAssertion
from jmeter_api.basics.listener.elements import BasicListener
from jmeter_api.basics.sampler.elements import BasicSampler
from jmeter_api.basics.timer.elements import BasicTimer
from jmeter_api.basics.utils import IncludesElements
class BasicController(BasicElement, IncludesElements, ABC):
def __init__(self,
name: str = 'BasicController',
comments: str = '',
is_enabled: bool = True):
IncludesElements.__init__(self)
super().__init__(name=name,
comments=comments,
is_enabled=is_enabled)
def append(self, new_element: Union[BasicSampler, BasicTimer, BasicConfig, "BasicController",\
BasicPreProcessor, BasicPostProcessor, BasicAssertion, BasicListener]):
if not isinstance(new_element, (BasicSampler, BasicTimer, BasicConfig, BasicController,\
BasicPreProcessor, BasicPostProcessor, BasicAssertion, BasicListener)):
raise TypeError(
f'new_element must be BasicSampler, BasicTimer,\
BasicConfig or BasicController. {type(new_element)} was given')
self._elements.append(new_element)
return self
print.__call__()
| 45.289474 | 111 | 0.70889 |
acedf682a38ba23fa0984b31b4a2e45edef39364 | 445 | py | Python | stubs/micropython-esp32-1_14/ure.py | Josverl/micropython-stubs | 3c32403ba2b57375f311ac0d023cd529340efe62 | [
"MIT"
] | 38 | 2020-10-18T21:59:44.000Z | 2022-03-17T03:03:28.000Z | all-stubs/micropython-esp32-1_14/ure.py | ks-tec/Hydroponic | d9347f82698841d85c0a45908e8671b36c50ffce | [
"MIT"
] | 176 | 2020-10-18T14:31:03.000Z | 2022-03-30T23:22:39.000Z | all-stubs/micropython-esp32-1_14/ure.py | ks-tec/Hydroponic | d9347f82698841d85c0a45908e8671b36c50ffce | [
"MIT"
] | 6 | 2020-12-28T21:11:12.000Z | 2022-02-06T04:07:50.000Z | """
Module: 'ure' on micropython-esp32-1.14
"""
# MCU: {'ver': '1.14', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.14.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.14.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.3.9
def compile():
pass
def match():
pass
def search():
pass
def sub():
pass
| 24.722222 | 286 | 0.579775 |
acedf6a3e3a17bc6deded1ddcc213fa693c2457d | 892 | py | Python | odoo-14.0/addons/l10n_pt/__manifest__.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/l10n_pt/__manifest__.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/l10n_pt/__manifest__.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2012 Thinkopen Solutions, Lda. All Rights Reserved
# http://www.thinkopensolutions.com.
{
'name': 'Portugal - Accounting',
'version': '0.011',
'author': 'ThinkOpen Solutions',
'website': 'http://www.thinkopensolutions.com/',
'category': 'Accounting/Localizations/Account Charts',
'description': 'Plano de contas SNC para Portugal',
'depends': ['base',
'account',
'base_vat',
],
'data': [
'data/l10n_pt_chart_data.xml',
'data/account_chart_template_data.xml',
'data/account_fiscal_position_template_data.xml',
'data/account_data.xml',
'data/account_tax_data.xml',
'data/account_chart_template_configure_data.xml',
],
}
| 33.037037 | 74 | 0.608744 |
acedf6cf49475e5c1b031892c90a37d7e18929b5 | 240 | py | Python | taco/util/default_root.py | MinerGreggy/taco-blockchain | 4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d | [
"Apache-2.0"
] | 18 | 2021-07-14T09:56:37.000Z | 2022-02-09T04:32:58.000Z | taco/util/default_root.py | MinerGreggy/taco-blockchain | 4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d | [
"Apache-2.0"
] | 9 | 2021-07-14T15:48:28.000Z | 2021-10-10T02:32:59.000Z | taco/util/default_root.py | MinerGreggy/taco-blockchain | 4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d | [
"Apache-2.0"
] | 10 | 2021-07-18T03:22:43.000Z | 2022-03-15T08:40:06.000Z | import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("TACO_ROOT", "~/.taco/mainnet"))).resolve()
DEFAULT_KEYS_ROOT_PATH = Path(os.path.expanduser(os.getenv("TACO_KEYS_ROOT", "~/.taco_keys"))).resolve()
| 34.285714 | 104 | 0.754167 |
acedf74dfc2f108a3b14c31f4aeccda1507cf118 | 1,591 | py | Python | includes/anchor_positive_examples.py | fendouai/2018_mldm-deep_metric_learing | e82bdb430fe035734139b902620ffbcb893c471d | [
"Apache-2.0"
] | 18 | 2019-10-03T19:25:06.000Z | 2019-10-16T02:06:47.000Z | includes/anchor_positive_examples.py | stefanthaler/2018_mldm-deep_metric_learing | 57e71f3d3d4c16833351d438f0c1e96387fe7587 | [
"Apache-2.0"
] | null | null | null | includes/anchor_positive_examples.py | stefanthaler/2018_mldm-deep_metric_learing | 57e71f3d3d4c16833351d438f0c1e96387fe7587 | [
"Apache-2.0"
] | 2 | 2019-03-29T01:44:57.000Z | 2019-04-03T08:20:18.000Z | # returns all euclidean distances for which either the labels are the same or the jaccard distance is below the positive threshold
def anchor_positive_examples(pw_label_equality, labels_in_batch, pw_jaccard_distances, pw_euclidean_distances, jd_pos_threshold):
batch_size = tf.shape(pw_label_equality)[0]
labels_not_in_batch = tf.logical_not(labels_in_batch) # labels in batch is a bad name. It should be - we have labeled exampled for this example
# positive conditions
labels_match = tf.not_equal(pw_label_equality, tf.eye(batch_size, dtype=tf.int32)) # exclude equality between same elements
pw_ji_for_pos = tf.add(pw_jaccard_distances, tf.eye(batch_size)*1.5) # jaccard distance is between 0 and 1 - exclude equality between same elements
sequences_have_pos_jd = tf.less(x=pw_ji_for_pos, y=jd_pos_threshold, name="jd_pos_cond") # sequences are
# it's either an anchor-positive example because the jaccard distance is smaller than the threshold or because the labels are the same.
pos_because_of_labels = tf.logical_and(labels_in_batch, labels_match)
pos_because_of_jd = tf.logical_and(labels_not_in_batch, sequences_have_pos_jd)
pos_cond = tf.logical_or(pos_because_of_labels, pos_because_of_jd)
# exclude example itself from positive / negative - euclidean distance to between two identical vectors should always be 0
positive_ed = tf.where(condition=pos_cond , x=pw_euclidean_distances, y=tf.ones_like(pw_euclidean_distances)*-1) # -1 means non positive
return positive_ed, pos_because_of_labels, pos_because_of_jd | 88.388889 | 151 | 0.792583 |
acedf80d032e2059dbdb29b615f0a97930d58019 | 2,457 | py | Python | pychemia/utils/serializer.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 67 | 2015-01-31T07:44:55.000Z | 2022-03-21T21:43:34.000Z | pychemia/utils/serializer.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 13 | 2016-06-03T19:07:51.000Z | 2022-03-31T04:20:40.000Z | pychemia/utils/serializer.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 37 | 2015-01-22T15:37:23.000Z | 2022-03-21T15:38:10.000Z |
import json
from abc import ABCMeta, abstractmethod
import numpy as np
from pychemia.utils.computing import deep_unicode
from numbers import Integral, Real
class PyChemiaJsonable(object):
"""
Abstract base class specifying how to convert objects from/to dictionaries.
PyChemiaJsonable objects must implement a to_dict property and a from_dict static method.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def to_dict(self):
"""
A JSON representation of an object.
"""
pass
@classmethod
def from_dict(cls, json_dict):
"""
This implements a default from_dict method which supports all
classes that simply try to recreate an object using the keys
as arguments for the creation of the new object.
:param json_dict: Recreate an object from its serialize form
:return:
"""
argstring = ''
for key in json_dict:
argstring += key + '=' + str(json_dict[key]) + ', '
argstring = argstring[:-2]
print(str(cls) + '(' + argstring + ')')
return eval(str(cls) + '(' + argstring + ')')
@property
def to_json(self):
"""
Returns a json string representation of the object.
"""
return json.dumps(self)
def save_to_file(self, filename):
"""
Writes the json representation to a file.
:param filename: (str) Filename for the json that will be created
"""
with open(filename, "w") as f:
json.dump(self, f)
def generic_serializer(value):
"""
A generic serializer for very common values
:param value:
:return:
"""
value = deep_unicode(value)
if value is None:
return None
elif isinstance(value, dict):
new_value = {}
for i in value:
new_value[i] = generic_serializer(value[i])
return new_value
elif hasattr(value, '__iter__'):
return [generic_serializer(element) for element in value]
elif isinstance(value, str):
return value
elif isinstance(value, Integral):
return int(value)
elif isinstance(value, Real):
return float(value)
elif isinstance(value, np.integer):
return int(value)
elif isinstance(value, np.float):
return float(value)
else:
raise ValueError("Could not serialize this: %s of type: %s" % (value, type(value)))
| 27.920455 | 93 | 0.616606 |
acedf80fb8642c7da012fe37a18066d17f35a6b4 | 3,781 | py | Python | build/debian_system_builder/debian_system_fbcode_builder_config.py | chaytanyasinha/openr | 32550704261a7ac1f069c5fad811de8bff7ecae5 | [
"MIT"
] | 1 | 2019-11-04T19:57:33.000Z | 2019-11-04T19:57:33.000Z | build/debian_system_builder/debian_system_fbcode_builder_config.py | chaytanyasinha/openr | 32550704261a7ac1f069c5fad811de8bff7ecae5 | [
"MIT"
] | null | null | null | build/debian_system_builder/debian_system_fbcode_builder_config.py | chaytanyasinha/openr | 32550704261a7ac1f069c5fad811de8bff7ecae5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import debian_specs.fbzmq as fbzmq
import debian_specs.python_fbthrift as python_fbthrift
import debian_specs.python_fbzmq as python_fbzmq
import specs.fbthrift as fbthrift
import specs.folly as folly
import specs.re2 as re2
from shell_quoting import ShellQuoted, path_join
"fbcode_builder steps to build & test Openr"
def fbcode_builder_spec(builder):
builder.add_option("thom311/libnl:git_hash", "libnl3_2_25")
builder.add_option("openr/build:cmake_defines", {"ADD_ROOT_TESTS": "OFF"})
maybe_curl_patch = []
patch = path_join(
builder.option("projects_dir"),
"../shipit_projects/openr/build/fix-route-obj-attr-list.patch",
)
if not builder.has_option("shipit_project_dir"):
maybe_curl_patch = [
builder.run(
ShellQuoted(
"curl -O https://raw.githubusercontent.com/facebook/openr/master/"
"build/fix-route-obj-attr-list.patch"
)
)
]
patch = "fix-route-obj-attr-list.patch"
libnl_build_commands = maybe_curl_patch + [
builder.run(ShellQuoted("git apply {p}").format(p=patch)),
builder.run(ShellQuoted("./autogen.sh")),
builder.configure(),
builder.make_and_install(),
]
return {
"depends_on": [folly, fbthrift, python_fbthrift, fbzmq, python_fbzmq, re2],
"steps": [
builder.github_project_workdir("thom311/libnl", "."),
builder.step("Build and install thom311/libnl", libnl_build_commands),
builder.fb_github_project_workdir("openr/build", "facebook"),
builder.step(
"Build and install openr/build",
[
builder.cmake_configure("openr/build"),
# we need the pythonpath to find the thrift compiler
builder.run(
ShellQuoted(
'PYTHONPATH="$PYTHONPATH:"{p}/lib/python2.7/site-packages '
"make -j {n}"
).format(
p=builder.option("prefix"),
n=builder.option("make_parallelism"),
)
),
builder.run(ShellQuoted("sudo make install")),
builder.run(ShellQuoted("sudo ldconfig")),
],
),
builder.step(
"Install OpenR python modules",
[
builder.workdir(
path_join(builder.option("projects_dir"), "openr/openr/py")
),
builder.run(
ShellQuoted(
"sudo pip install cffi future pathlib 'networkx==2.2'"
)
),
builder.run(ShellQuoted("sudo python setup.py build")),
builder.run(ShellQuoted("sudo python setup.py install")),
],
),
builder.step(
"Run openr tests",
[
builder.workdir(
path_join(builder.option("projects_dir"), "openr/build")
),
builder.run(ShellQuoted("CTEST_OUTPUT_ON_FAILURE=TRUE make test")),
],
),
],
}
config = {
"github_project": "facebook/openr",
"fbcode_builder_spec": fbcode_builder_spec,
}
| 35.669811 | 87 | 0.539804 |
acedfa74368e8239dc4d4b1b028989a785497f74 | 5,166 | py | Python | intask_api/projects/tests/tests_project_users_api.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | null | null | null | intask_api/projects/tests/tests_project_users_api.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | 7 | 2016-08-17T23:08:31.000Z | 2022-03-02T02:23:08.000Z | intask_api/projects/tests/tests_project_users_api.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | null | null | null | from rest_framework.test import APIClient, APITestCase
from rest_framework import status
from intask_api.projects.models import Project
from django.contrib.auth.models import User
# TODO: test changing datetimes
class ProjectUsersTest(APITestCase):
fixtures = ['users.json', 'projects.json']
def setUp(self):
self.project = Project.objects.first()
self.members = self.project.users.all()
self.base_url = "/api/v1/projects/{0}/users/".format(self.project.id)
# header_client
self.header = self.project.header
self.header_client = APIClient()
self.header_client.login(username=self.header.username, password="password")
# member_client
self.member = self.members.exclude(id=self.project.header.id).first()
self.member_client = APIClient()
self.member_client.login(username=self.member.username, password="password")
self.member_url = self.base_url + '{0}/'.format(self.member.id)
# not member client
self.no_member = User.objects.all().exclude(id__in=self.members.values_list('id', flat=True)).first()
self.no_member_client = APIClient()
self.no_member_client.login(username=self.no_member.username, password="password")
def test_anonymous_list_users(self):
# anonymous
r = self.client.get(self.base_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
def test_member_list_users(self):
data = self.member_client.get(self.base_url).json()
project_pks = set(map(lambda x: x['id'], data))
self.assertEqual(set(self.members.values_list('id', flat=True)), project_pks)
def test_not_member_list_users(self):
# not member
r = self.no_member_client.get(self.base_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
def test_anonymous_get_member(self):
# anonymous gets member
r = self.client.get(self.member_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# header gets member
r = self.header_client.get(self.member_url)
self.assertEqual(r.status_code, status.HTTP_200_OK)
self.assertEqual(r.json()['id'], self.member.id)
# member gets member
r = self.member_client.get(self.member_url)
self.assertEqual(r.status_code, status.HTTP_200_OK)
self.assertEqual(r.json()['id'], self.member.id)
# member gets another member
another_member = self.project.users.all().exclude(id=self.member.id).first()
r = self.member_client.get(self.base_url + '{0}/'.format(another_member.id))
self.assertEqual(r.status_code, status.HTTP_200_OK)
self.assertEqual(r.json()['id'], another_member.id)
# not member gets member
r = self.no_member_client.get(self.member_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
def test_add_new_member(self):
# anonymous
data = dict(email=self.no_member.email)
r = self.client.post(self.base_url, data=data)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# member
r = self.member_client.post(self.base_url, data=data)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# no member
r = self.no_member_client.post(self.base_url, data=data)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# header
r = self.header_client.post(self.base_url, data)
self.assertEqual(r.status_code, status.HTTP_200_OK)
def test_add_current_member(self):
data = dict(email=self.member.email)
r = self.header_client.post(self.base_url, data)
self.assertEqual(r.status_code, status.HTTP_200_OK)
member_pks = self.project.users.all().values_list('id', flat=True)
self.assertSetEqual(set(self.members.values_list('id', flat=True)), set(member_pks))
def test_delete_member(self):
# anonymous
r = self.client.delete(self.member_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# member
another_member = self.project.users.all().exclude(id=self.member.id).first()
r = self.member_client.delete(self.base_url + '{0}/'.format(another_member.id))
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# no member
r = self.no_member_client.delete(self.member_url)
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
# header
r = self.header_client.delete(self.member_url)
self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_header(self):
# header is deleting himself from project
header_url = self.base_url + '{0}/'.format(self.header.id)
r = self.header_client.delete(header_url)
self.assertEqual(r.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_no_member(self):
no_member_url = self.base_url + '{0}/'.format(self.no_member.id)
r = self.header_client.delete(no_member_url)
self.assertEqual(r.status_code, status.HTTP_404_NOT_FOUND)
| 44.153846 | 109 | 0.686605 |
acedfa927be20707a51a828089a1b5e5d43b3341 | 117 | py | Python | oldfiles/newmerge.py | rupin/pdfmerger | fee19523e88362d215f1a29cdab0d140f4c9385c | [
"MIT"
] | null | null | null | oldfiles/newmerge.py | rupin/pdfmerger | fee19523e88362d215f1a29cdab0d140f4c9385c | [
"MIT"
] | null | null | null | oldfiles/newmerge.py | rupin/pdfmerger | fee19523e88362d215f1a29cdab0d140f4c9385c | [
"MIT"
] | null | null | null | from PyPDF2 import PdfFileReader
fileName="abc.pdf"
resultfile=PdfFileReader(fileName)
resultFile.createBlankPage()
| 19.5 | 34 | 0.846154 |
acedfb33986e0a87d16b41ed4b57dd8e31199523 | 21,822 | py | Python | ios/build/bots/scripts/test_apps.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | ios/build/bots/scripts/test_apps.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | ios/build/bots/scripts/test_apps.py | iridium-browser/iridium-browser | 907e31cf5ce5ad14d832796e3a7c11e496828959 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test apps for running tests using xcodebuild."""
import os
import plistlib
import subprocess
import time
import shard_util
import test_runner
OUTPUT_DISABLED_TESTS_TEST_ARG = '--write-compiled-tests-json-to-writable-path'
#TODO(crbug.com/1046911): Remove usage of KIF filters.
def get_kif_test_filter(tests, invert=False):
"""Returns the KIF test filter to filter the given test cases.
Args:
tests: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will match
everything except the given test cases.
Returns:
A string which can be supplied to GKIF_SCENARIO_FILTER.
"""
# A pipe-separated list of test cases with the "KIF." prefix omitted.
# e.g. NAME:a|b|c matches KIF.a, KIF.b, KIF.c.
# e.g. -NAME:a|b|c matches everything except KIF.a, KIF.b, KIF.c.
test_filter = '|'.join(test.split('KIF.', 1)[-1] for test in tests)
if invert:
return '-NAME:%s' % test_filter
return 'NAME:%s' % test_filter
def get_gtest_filter(tests, invert=False):
"""Returns the GTest filter to filter the given test cases.
Args:
tests: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will match
everything except the given test cases.
Returns:
A string which can be supplied to --gtest_filter.
"""
# A colon-separated list of tests cases.
# e.g. a:b:c matches a, b, c.
# e.g. -a:b:c matches everything except a, b, c.
test_filter = ':'.join(test for test in tests)
if invert:
return '-%s' % test_filter
return test_filter
def get_bundle_id(app_path):
"""Get bundle identifier for app.
Args:
app_path: (str) A path to app.
"""
return subprocess.check_output([
'/usr/libexec/PlistBuddy',
'-c',
'Print:CFBundleIdentifier',
os.path.join(app_path, 'Info.plist'),
]).rstrip()
class GTestsApp(object):
"""Gtests app to run.
Stores data about egtests:
test_app: full path to an app.
"""
def __init__(self,
test_app,
included_tests=None,
excluded_tests=None,
test_args=None,
env_vars=None,
release=False,
host_app_path=None,
inserted_libs=None):
"""Initialize Egtests.
Args:
test_app: (str) full path to egtests app.
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching.
env_vars: List of environment variables to pass to the test itself.
release: (bool) Whether the app is release build.
inserted_libs: List of libraries to insert when running the test.
Raises:
AppNotFoundError: If the given app does not exist
"""
if not os.path.exists(test_app):
raise test_runner.AppNotFoundError(test_app)
self.test_app_path = test_app
self.project_path = os.path.dirname(self.test_app_path)
self.test_args = test_args or []
self.env_vars = {}
for env_var in env_vars or []:
env_var = env_var.split('=', 1)
self.env_vars[env_var[0]] = None if len(env_var) == 1 else env_var[1]
self.included_tests = included_tests or []
self.excluded_tests = excluded_tests or []
self.disabled_tests = []
self.module_name = os.path.splitext(os.path.basename(test_app))[0]
self.release = release
self.host_app_path = host_app_path
self.inserted_libs = inserted_libs or []
def fill_xctest_run(self, out_dir):
"""Fills xctestrun file by egtests.
Args:
out_dir: (str) A path where xctestrun will store.
Returns:
A path to xctestrun file.
"""
folder = os.path.abspath(os.path.join(out_dir, os.pardir))
if not os.path.exists(folder):
os.makedirs(folder)
xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))
if not os.path.exists(xctestrun):
with open(xctestrun, 'w'):
pass
# Creates a dict with data about egtests to run - fill all required fields:
# egtests_module, egtest_app_path, egtests_xctest_path and
# filtered tests if filter is specified.
# Write data in temp xctest run file.
plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)
return xctestrun
def fill_xctestrun_node(self):
"""Fills only required nodes for egtests in xctestrun file.
Returns:
A node with filled required fields about egtests.
"""
module = self.module_name + '_module'
# If --run-with-custom-webkit is passed as a test arg, set up
# DYLD_FRAMEWORK_PATH and DYLD_LIBRARY_PATH to load the custom webkit
# modules.
dyld_path = self.project_path
if '--run-with-custom-webkit' in self.test_args:
if self.host_app_path:
webkit_path = os.path.join(self.host_app_path, 'WebKitFrameworks')
else:
webkit_path = os.path.join(self.test_app_path, 'WebKitFrameworks')
dyld_path = dyld_path + ':' + webkit_path
module_data = {
'TestBundlePath': self.test_app_path,
'TestHostPath': self.test_app_path,
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestingEnvironmentVariables': {
'DYLD_LIBRARY_PATH':
'%s:__PLATFORMS__/iPhoneSimulator.platform/Developer/Library' %
dyld_path,
'DYLD_FRAMEWORK_PATH':
'%s:__PLATFORMS__/iPhoneSimulator.platform/'
'Developer/Library/Frameworks' % dyld_path,
}
}
if self.inserted_libs:
module_data['TestingEnvironmentVariables'][
'DYLD_INSERT_LIBRARIES'] = ':'.join(self.inserted_libs)
xctestrun_data = {module: module_data}
kif_filter = []
gtest_filter = []
if self.included_tests:
kif_filter = get_kif_test_filter(self.included_tests, invert=False)
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
kif_filter = get_kif_test_filter(self.excluded_tests, invert=True)
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if kif_filter:
self.env_vars['GKIF_SCENARIO_FILTER'] = gtest_filter
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [el for el in self.test_args
if not el.startswith('--gtest_filter=')]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
if self.env_vars:
xctestrun_data[module].update({'EnvironmentVariables': self.env_vars})
if self.test_args:
xctestrun_data[module].update({'CommandLineArguments': self.test_args})
if self.excluded_tests:
xctestrun_data[module].update({
'SkipTestIdentifiers': self.excluded_tests
})
if self.included_tests:
xctestrun_data[module].update({
'OnlyTestIdentifiers': self.included_tests
})
return xctestrun_data
def command(self, out_dir, destination, shards):
"""Returns the command that launches tests using xcodebuild.
Format of command:
xcodebuild test-without-building -xctestrun file.xctestrun \
-parallel-testing-enabled YES -parallel-testing-worker-count %d% \
[-destination "destination"] -resultBundlePath %output_path%
Args:
out_dir: (str) An output directory.
destination: (str) A destination of running simulator.
shards: (int) A number of shards.
Returns:
A list of strings forming the command to launch the test.
"""
cmd = [
'xcodebuild', 'test-without-building',
'-xctestrun', self.fill_xctest_run(out_dir),
'-destination', destination,
'-resultBundlePath', out_dir
]
if shards > 1:
cmd += ['-parallel-testing-enabled', 'YES',
'-parallel-testing-worker-count', str(shards)]
return cmd
def get_all_tests(self):
"""Gets all tests to run in this object."""
# Method names that starts with test* and also are in *TestCase classes
# but they are not test-methods.
# TODO(crbug.com/982435): Rename not test methods with test-suffix.
none_tests = ['ChromeTestCase/testServer', 'FindInPageTestCase/testURL']
# TODO(crbug.com/1123681): Move all_tests to class var. Set all_tests,
# disabled_tests values in initialization to avoid multiple calls to otool.
all_tests = []
# Only store the tests when there is the test arg.
store_disabled_tests = OUTPUT_DISABLED_TESTS_TEST_ARG in self.test_args
self.disabled_tests = []
for test_class, test_method in shard_util.fetch_test_names(
self.test_app_path,
self.host_app_path,
self.release,
enabled_tests_only=False):
test_name = '%s/%s' % (test_class, test_method)
if (test_name not in none_tests and
# inlcuded_tests contains the tests to execute, which may be a subset
# of all tests b/c of the iOS test sharding logic in run.py. Filter by
# self.included_tests if specified
(test_class in self.included_tests if self.included_tests else True)):
if test_method.startswith('test'):
all_tests.append(test_name)
elif store_disabled_tests:
self.disabled_tests.append(test_name)
return all_tests
class EgtestsApp(GTestsApp):
"""Egtests to run.
Stores data about egtests:
egtests_app: full path to egtests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self,
egtests_app,
included_tests=None,
excluded_tests=None,
test_args=None,
env_vars=None,
release=False,
host_app_path=None,
inserted_libs=None):
"""Initialize Egtests.
Args:
egtests_app: (str) full path to egtests app.
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching.
env_vars: List of environment variables to pass to the test itself.
host_app_path: (str) full path to host app.
inserted_libs: List of libraries to insert when running the test.
Raises:
AppNotFoundError: If the given app does not exist
"""
inserted_libs = list(inserted_libs or [])
inserted_libs.append('__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'usr/lib/libXCTestBundleInject.dylib')
super(EgtestsApp,
self).__init__(egtests_app, included_tests, excluded_tests, test_args,
env_vars, release, host_app_path, inserted_libs)
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def fill_xctestrun_node(self):
"""Fills only required nodes for egtests in xctestrun file.
Returns:
A node with filled required fields about egtests.
"""
xctestrun_data = super(EgtestsApp, self).fill_xctestrun_node()
module_data = xctestrun_data[self.module_name + '_module']
module_data['TestBundlePath'] = '__TESTHOST__/%s' % self._xctest_path()
module_data['TestingEnvironmentVariables'][
'XCInjectBundleInto'] = '__TESTHOST__/%s' % self.module_name
if self.host_app_path:
# Module data specific to EG2 tests
module_data['IsUITestBundle'] = True
module_data['IsXCTRunnerHostedTestBundle'] = True
module_data['UITargetAppPath'] = '%s' % self.host_app_path
# Special handling for Xcode10.2
dependent_products = [
module_data['UITargetAppPath'],
module_data['TestBundlePath'],
module_data['TestHostPath']
]
module_data['DependentProductPaths'] = dependent_products
# Module data specific to EG1 tests
else:
module_data['IsAppHostedTestBundle'] = True
return xctestrun_data
class DeviceXCTestUnitTestsApp(GTestsApp):
"""XCTest hosted unit tests to run on devices.
This is for the XCTest framework hosted unit tests running on devices.
Stores data about tests:
tests_app: full path to tests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self,
tests_app,
included_tests=None,
excluded_tests=None,
test_args=None,
env_vars=None,
release=False):
"""Initialize the class.
Args:
tests_app: (str) full path to tests app.
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching. Test arg to run as XCTest based unit test will be appended.
env_vars: List of environment variables to pass to the test itself.
Raises:
AppNotFoundError: If the given app does not exist
"""
test_args = list(test_args or [])
test_args.append('--enable-run-ios-unittests-with-xctest')
super(DeviceXCTestUnitTestsApp,
self).__init__(tests_app, included_tests, excluded_tests, test_args,
env_vars, release, None)
# TODO(crbug.com/1077277): Refactor class structure and remove duplicate code.
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def fill_xctestrun_node(self):
"""Fills only required nodes for XCTest hosted unit tests in xctestrun file.
Returns:
A node with filled required fields about tests.
"""
xctestrun_data = {
'TestTargetName': {
'IsAppHostedTestBundle': True,
'TestBundlePath': '__TESTHOST__/%s' % self._xctest_path(),
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestHostPath': '%s' % self.test_app_path,
'TestingEnvironmentVariables': {
'DYLD_INSERT_LIBRARIES':
'__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',
'DYLD_LIBRARY_PATH':
'__PLATFORMS__/iPhoneOS.platform/Developer/Library',
'DYLD_FRAMEWORK_PATH':
'__PLATFORMS__/iPhoneOS.platform/Developer/'
'Library/Frameworks',
'XCInjectBundleInto':
'__TESTHOST__/%s' % self.module_name
}
}
}
if self.env_vars:
self.xctestrun_data['TestTargetName'].update(
{'EnvironmentVariables': self.env_vars})
gtest_filter = []
if self.included_tests:
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [
el for el in self.test_args if not el.startswith('--gtest_filter=')
]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
self.test_args.append('--gmock_verbose=error')
xctestrun_data['TestTargetName'].update(
{'CommandLineArguments': self.test_args})
return xctestrun_data
class SimulatorXCTestUnitTestsApp(GTestsApp):
"""XCTest hosted unit tests to run on simulators.
This is for the XCTest framework hosted unit tests running on simulators.
Stores data about tests:
tests_app: full path to tests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self,
tests_app,
included_tests=None,
excluded_tests=None,
test_args=None,
env_vars=None,
release=False):
"""Initialize the class.
Args:
tests_app: (str) full path to tests app.
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching. Test arg to run as XCTest based unit test will be appended.
env_vars: List of environment variables to pass to the test itself.
Raises:
AppNotFoundError: If the given app does not exist
"""
test_args = list(test_args or [])
test_args.append('--enable-run-ios-unittests-with-xctest')
super(SimulatorXCTestUnitTestsApp,
self).__init__(tests_app, included_tests, excluded_tests, test_args,
env_vars, release, None)
# TODO(crbug.com/1077277): Refactor class structure and remove duplicate code.
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def fill_xctestrun_node(self):
"""Fills only required nodes for XCTest hosted unit tests in xctestrun file.
Returns:
A node with filled required fields about tests.
"""
xctestrun_data = {
'TestTargetName': {
'IsAppHostedTestBundle': True,
'TestBundlePath': '__TESTHOST__/%s' % self._xctest_path(),
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestHostPath': '%s' % self.test_app_path,
'TestingEnvironmentVariables': {
'DYLD_INSERT_LIBRARIES':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'
'libXCTestBundleInject.dylib',
'DYLD_LIBRARY_PATH':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',
'DYLD_FRAMEWORK_PATH':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'Library/Frameworks',
'XCInjectBundleInto':
'__TESTHOST__/%s' % self.module_name
}
}
}
if self.env_vars:
self.xctestrun_data['TestTargetName'].update(
{'EnvironmentVariables': self.env_vars})
gtest_filter = []
if self.included_tests:
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [
el for el in self.test_args if not el.startswith('--gtest_filter=')
]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
self.test_args.append('--gmock_verbose=error')
xctestrun_data['TestTargetName'].update(
{'CommandLineArguments': self.test_args})
return xctestrun_data
| 36.009901 | 80 | 0.6652 |
acedfb903b5f1e9580e657e0a495eacecd782e15 | 2,501 | py | Python | plugins/tricks.py | NUKnightLab/slask | 91e21b391f3d8ac94fd5a1b7335483f49a5278b4 | [
"MIT"
] | 3 | 2015-03-11T05:01:55.000Z | 2021-04-29T01:52:52.000Z | plugins/tricks.py | NUKnightLab/slask | 91e21b391f3d8ac94fd5a1b7335483f49a5278b4 | [
"MIT"
] | null | null | null | plugins/tricks.py | NUKnightLab/slask | 91e21b391f3d8ac94fd5a1b7335483f49a5278b4 | [
"MIT"
] | null | null | null | """Directly addressing @leelou, you can ask her to do a trick."""
import re
from random import choice
import logging
# U03J911F1
TRIGGER = re.compile(r'<@(.+)>\s+(.+)',re.IGNORECASE)
def on_message(msg, context):
"""Context has keys: 'client', 'config', 'hooks'"""
text = msg.get("text", "")
match = TRIGGER.match(text)
if not match: return
user_id, command = match.groups()
user = context['client'].server.users.get(user_id,None)
if user: logging.info('tricks @{}'.format(user['name']))
else: logging.warn("tricks doesn't know user id {}".format(user_id))
if user and user['name'] == 'leelou':
for pat, func in PHRASES:
if pat.match(command):
return func(command)
return None
def on_presence_change(msg, context):
if msg.get('presence') == 'active':
user_id = user_obj = user_name = None
user_id = msg.get('user',None)
if user_id:
user_obj = context['client'].server.users.get(user_id,None)
if user_obj:
user_name = user_obj.get('name',None)
logging.debug(u"Leelou sees {} ({})".format(user_id,user_name))
return None
ROLL_OVER_GIFS = ['http://stream1.gifsoup.com/view4/2110073/french-bulldog-roll-over-o.gif', 'http://www.beheadingboredom.com/wp-content/uploads/2013/04/cat-teaches-dog-trick.gif', 'http://giphy.com/gifs/NnafYvjXZK9j2/html5']
SHAKE_GIFS = ['http://petapixel.com/assets/uploads/2011/07/dog2.jpg', 'http://petapixel.com/assets/uploads/2013/10/shake3.jpg', 'http://thumbs.dreamstime.com/x/wet-dog-shaking-6990844.jpg', 'http://pad1.whstatic.com/images/thumb/c/c4/Teach-Your-Dog-to-Shake-Hands-Step-3.jpg/670px-Teach-Your-Dog-to-Shake-Hands-Step-3.jpg', 'http://petsitterpatrol.com/wp-content/uploads/2012/04/high-five1-300x213.jpg', 'http://giphy.com/gifs/KW6evDKuJkv4s/html5', 'http://giphy.com/gifs/PWuPMJggRWDCM/html5']
PLAY_DEAD_GIFS = [ 'http://giphy.com/gifs/cute-sloth-playing-dead-y5owIXKzlPYA0', 'http://giphy.com/gifs/NaSfr9cS4maLC/html5', 'http://giphy.com/gifs/55HTlUXBKXGWA/html5', 'http://giphy.com/gifs/ZptvPz6BWZM8U/html5' ]
def roll_over(command):
return choice(ROLL_OVER_GIFS)
def shake(command):
return choice(SHAKE_GIFS)
def play_dead(command):
return choice(PLAY_DEAD_GIFS)
PHRASES = [
(re.compile(r'^.*roll\s*over.*$', re.IGNORECASE), roll_over),
(re.compile(r'^.*shake.*$', re.IGNORECASE), shake),
(re.compile(r'^.*play ded.*$', re.IGNORECASE), play_dead),
]
| 42.389831 | 493 | 0.672931 |
acedfccfaa5d00efa52ef0fa0a0723a5f19a314f | 800 | gyp | Python | packages/node-firebird-native-api/binding.gyp | gsbelarus/node-firebird-drivers | 908af2ae47004553abb9dc2a5719a040d2bc0528 | [
"MIT"
] | 45 | 2018-06-13T01:01:31.000Z | 2022-03-26T16:37:55.000Z | packages/node-firebird-native-api/binding.gyp | gsbelarus/node-firebird-drivers | 908af2ae47004553abb9dc2a5719a040d2bc0528 | [
"MIT"
] | 65 | 2018-06-13T02:04:21.000Z | 2022-03-30T17:53:22.000Z | packages/node-firebird-native-api/binding.gyp | gsbelarus/node-firebird-drivers | 908af2ae47004553abb9dc2a5719a040d2bc0528 | [
"MIT"
] | 24 | 2018-07-23T12:48:51.000Z | 2022-03-30T17:00:42.000Z | {
"targets": [
{
"target_name": "addon",
"sources": [
"src/native/fb-native.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"defines": [
'NAPI_VERSION=4',
'NAPI_EXPERIMENTAL'
],
"configurations": {
"Release": {
"defines": [
"NDEBUG"
]
}
},
'conditions': [
[
'OS == "win"', {
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": 1,
"AdditionalOptions": ["/bigobj"]
}
},
"defines": [
"_HAS_EXCEPTIONS=1"
]
},
'OS == "mac"', {
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
}
}
]
]
}
]
}
| 17.021277 | 56 | 0.45 |
acedfd2a0f673b37811e65dbef1f0bf710a33d62 | 14,695 | py | Python | cyclopeps/tests/ctf_test/test_cal_energy.py | philliphelms/cyclopeps | f024d827a7412f4d9df10d6b9453c2692b1a74c3 | [
"MIT"
] | null | null | null | cyclopeps/tests/ctf_test/test_cal_energy.py | philliphelms/cyclopeps | f024d827a7412f4d9df10d6b9453c2692b1a74c3 | [
"MIT"
] | null | null | null | cyclopeps/tests/ctf_test/test_cal_energy.py | philliphelms/cyclopeps | f024d827a7412f4d9df10d6b9453c2692b1a74c3 | [
"MIT"
] | null | null | null | import unittest
from cyclopeps.tools.utils import *
import copy
from cyclopeps.tools.gen_ten import ones,einsum
class test_cal_energy(unittest.TestCase):
def test_energy_contraction_ones_z2(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=Identity, peps=ones, Z2 symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
Zn = 2
backend='ctf'
peps = PEPS(Nx=Nx,
Ny=Ny,
d=d,
D=D,
chi=1000,
Zn=2,
backend=backend,
normalize=False)
# Set all tensor values to 1
for xind in range(Nx):
for yind in range(Ny):
peps[xind][yind].fill_all(1.)
# Get the Hamiltonian
from cyclopeps.ops.identity import return_op
ham = return_op(Nx,Ny,sym='Z2',backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()*4.
mpiprint(0,'Norm (routine) = {}'.format(norm0))
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->LDluWXCc',peps[0][0],peps[0][1]).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXCc,CdYRm->dRWXYcm',bra,peps[1][0]).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXYcm,cmZru->ruWXYZ',bra,peps[1][1]).remove_empty_ind(0).remove_empty_ind(0)
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())
norm1 = norm1*4.
mpiprint(0,'Norm (explicit) = {}'.format(norm1))
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
mpiprint(0,'Explicitly computed energy (not normalized) = {}'.format(E1))
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
mpiprint(0,'Energy via peps Method (not normalized) = {}'.format(E2))
self.assertTrue(abs((norm0-norm1)/norm0) < 1e-10)
print('Check here {}, {}, {}, {}'.format(norm0,norm1,E1,E2))
self.assertTrue(abs((norm0-E1)/norm0) < 1e-10)
self.assertTrue(abs((norm0-E2)/norm0) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_contraction_ones(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=Identity, peps=ones, no symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
backend='ctf'
peps = PEPS(Nx=Nx,
Ny=Ny,
d=d,
D=D,
chi=1000,
Zn=None,
backend=backend,
normalize=False)
# Set all tensor values to 1
for xind in range(Nx):
for yind in range(Ny):
peps[xind][yind].fill_all(1.)
# Get the Hamiltonian
from cyclopeps.ops.identity import return_op
ham = return_op(Nx,Ny,backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()*4.
mpiprint(0,'Norm = {}'.format(norm0))
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->WXCc',peps[0][0],peps[0][1])
bra = einsum('WXCc,CdYRm->WXYcm',bra,peps[1][0])
bra = einsum('WXYcm,cmZru->WXYZ',bra,peps[1][1])
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())*4.
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
mpiprint(0,'Explicitly computed energy (not normalized) = {}'.format(E1))
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
mpiprint(0,'Energy via peps Method (not normalized) = {}'.format(E2))
self.assertTrue(abs((norm0-norm1)/norm0) < 1e-10)
self.assertTrue(abs((norm0-E1)/norm0) < 1e-10)
self.assertTrue(abs((norm0-E2)/norm0) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_contraction_heis_z2(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=Heisenberg, peps=random, Z2 symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
Zn = 2
backend='ctf'
peps = PEPS(Nx=Nx,
Ny=Ny,
d=d,
D=D,
chi=1000,
Zn=2,
backend=backend,
normalize=False)
# Get the Hamiltonian
from cyclopeps.ops.heis import return_op
ham = return_op(Nx,Ny,sym='Z2',backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()
mpiprint(0,'Norm (routine) = {}'.format(norm0))
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->LDluWXCc',peps[0][0],peps[0][1]).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXCc,CdYRm->dRWXYcm',bra,peps[1][0]).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXYcm,cmZru->ruWXYZ',bra,peps[1][1]).remove_empty_ind(0).remove_empty_ind(0)
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())
norm1 = norm1
mpiprint(0,'Norm (explicit) = {}'.format(norm1))
#print(ham[0][0][0])
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
E1 = E1
mpiprint(0,'Explicitly computed energy (not normalized) = {}'.format(E1))
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
mpiprint(0,'Energy via peps Method (not normalized) = {}'.format(E2))
self.assertTrue(abs((norm0-norm1)/norm0) < 1e-10)
self.assertTrue(abs((E2-E1)/E1) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_contraction_z2(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=Identity, peps=random, Z2 symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
Zn = 2
backend='ctf'
peps = PEPS(Nx=Nx,
Ny=Ny,
d=d,
D=D,
chi=1000,
Zn=2,
backend=backend,
normalize=False)
# Get the Hamiltonian
from cyclopeps.ops.identity import return_op
ham = return_op(Nx,Ny,sym='Z2',backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()*4.
mpiprint(0,'Norm (routine) = {}'.format(norm0))
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->LDluWXCc',peps[0][0],peps[0][1]).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXCc,CdYRm->dRWXYcm',bra,peps[1][0]).remove_empty_ind(0).remove_empty_ind(0)
bra = einsum('WXYcm,cmZru->ruWXYZ',bra,peps[1][1]).remove_empty_ind(0).remove_empty_ind(0)
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())
norm1 = norm1*4
mpiprint(0,'Norm (explicit) = {}'.format(norm1))
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
mpiprint(0,'Explicitly computed energy (not normalized) = {}'.format(E1))
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
mpiprint(0,'Energy via peps Method (not normalized) = {}'.format(E2))
self.assertTrue(abs((norm0-norm1)/norm0) < 1e-10)
self.assertTrue(abs((E2-E1)/E2) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_itf_contraction_ones(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=ITF, peps=ones, no symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
backend='ctf'
peps = PEPS(Nx=Nx,Ny=Ny,d=d,D=D,chi=1000,normalize=False,backend=backend)
# Set all tensor values to 1
for xind in range(Nx):
for yind in range(Ny):
peps[xind][yind].fill_all(1.)
# Get the Hamiltonian
from cyclopeps.ops.itf import return_op
ham = return_op(Nx,Ny,(1.,2.),backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()
mpiprint(0,'Norm = {}'.format(norm0))
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->WXCc',peps[0][0],peps[0][1])
bra = einsum('WXCc,CdYRm->WXYcm',bra,peps[1][0])
bra = einsum('WXYcm,cmZru->WXYZ',bra,peps[1][1])
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())
mpiprint(0,'Explicitly computed norm = {}'.format(norm1))
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
mpiprint(0,'Explicitly computed energy (not normalized) = {}'.format(E1))
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
mpiprint(0,'Energy (routine) = {}'.format(E2))
self.assertTrue(abs((E2-E1)/E1) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_contraction(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=Identity, peps=random, no symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
backend='ctf'
peps = PEPS(Nx=Nx,Ny=Ny,d=d,D=D,chi=1000,backend=backend)
# Get the Hamiltonian
from cyclopeps.ops.identity import return_op
ham = return_op(Nx,Ny,backend=backend)
# Calculate initial norm
norm0 = peps.calc_norm()*4.
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->WXCc',peps[0][0],peps[0][1])
bra = einsum('WXCc,CdYRm->WXYcm',bra,peps[1][0])
bra = einsum('WXYcm,cmZru->WXYZ',bra,peps[1][1])
norm1 = einsum('WXYZ,WXYZ->',bra,bra.conj())*4.
tmp = einsum('WXYZ,wxYZ->WXwx',bra,bra.conj())
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,bra.conj())
E1 += einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,bra.conj())
E1 += einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,bra.conj())
E1 += einsum('XZxz,XZxz->',tmp,ham[1][1][0])
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False)
self.assertTrue(abs((norm0-norm1)/norm0) < 1e-10)
mpiprint(0,'Passed Norm1')
self.assertTrue(abs((norm0-E1)/norm0) < 1e-10)
mpiprint(0,'Passed E1')
mpiprint(0,'Norm from calc_norm = {}'.format(norm0))
mpiprint(0,'Norm from exact contraction {}'.format(norm1))
mpiprint(0,'Norm from Energy calc op = {}'.format(E2))
mpiprint(0,'Norm from Energy exact contraction {}'.format(E1))
#mpiprint(0,norm1,E1,norm0,E2,abs((norm0-E2)/norm0))
self.assertTrue(abs((norm0-E2)/norm0) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
def test_energy_itf_contraction(self):
mpiprint(0,'\n'+'='*50+'\nPeps Energy (Ham=ITF, peps=random, no symmetry) calculation\n'+'-'*50)
# Create a PEPS
from cyclopeps.tools.peps_tools import PEPS
Nx = 2
Ny = 2
d = 2
D = 3
backend='ctf'
peps = PEPS(Nx=Nx,Ny=Ny,d=d,D=D,chi=1000,backend=backend)
peps2= PEPS(Nx=Nx,Ny=Ny,d=d,D=D,chi=1000,backend=backend)
# Get the Hamiltonian
from cyclopeps.ops.itf import return_op
ham = return_op(Nx,Ny,(1.,2.),backend=backend)
# Perform the Exact energy calculation:
bra = einsum('LDWCM,lMXcu->WXCc',peps[0][0],peps[0][1])
bra = einsum('WXCc,CdYRm->WXYcm',bra,peps[1][0])
bra = einsum('WXYcm,cmZru->WXYZ',bra,peps[1][1])
ket = einsum('LDWCM,lMXcu->WXCc',peps2[0][0],peps2[0][1])
ket = einsum('WXCc,CdYRm->WXYcm',ket,peps2[1][0])
ket = einsum('WXYcm,cmZru->WXYZ',ket,peps2[1][1])
norm1 = einsum('WXYZ,WXYZ->',bra,ket)
tmp = einsum('WXYZ,wxYZ->WXwx',bra,ket)
E1 = einsum('WXwx,WXwx->',tmp,ham[0][0][0])
tmp = einsum('WXYZ,wXyZ->WYwy',bra,ket)
E2 = einsum('WYwy,WYwy->',tmp,ham[1][0][0])
tmp = einsum('WXYZ,WXyz->YZyz',bra,ket)
E3 = einsum('YZyz,YZyz->',tmp,ham[0][1][0])
tmp = einsum('WXYZ,WxYz->XZxz',bra,ket)
E4 = einsum('XZxz,XZxz->',tmp,ham[1][1][0])
E1 = E1+E2+E3+E4
# Contract Energy again
E2 = peps.calc_op(ham,normalize=False,chi=1e100,ket=peps2)
mpiprint(0,'Energy (exact) = {}'.format(E1))
mpiprint(0,'Energy (routine) = {}'.format(E2))
self.assertTrue(abs((E2-E1)/E1) < 1e-10)
mpiprint(0,'Passed\n'+'='*50)
if __name__ == "__main__":
unittest.main()
| 44.665653 | 147 | 0.55638 |
acedfd8dd7155a1130b6f56173e9931a9a6a526c | 1,524 | py | Python | etl/parsers/etw/Microsoft_Antimalware_Protection.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Antimalware_Protection.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Antimalware_Protection.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Antimalware-Protection
GUID : e4b70372-261f-4c54-8fa6-a5a7914d73da
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=1, version=0)
class Microsoft_Antimalware_Protection_1_0(Etw):
pattern = Struct(
"DwordData" / Int32ul
)
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=2, version=0)
class Microsoft_Antimalware_Protection_2_0(Etw):
pattern = Struct(
"DwordData" / Int32ul
)
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=3, version=0)
class Microsoft_Antimalware_Protection_3_0(Etw):
pattern = Struct(
"Description" / WString
)
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=4, version=0)
class Microsoft_Antimalware_Protection_4_0(Etw):
pattern = Struct(
"DwordData" / Int32ul
)
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=6, version=0)
class Microsoft_Antimalware_Protection_6_0(Etw):
pattern = Struct(
"DwordData" / Int32ul
)
@declare(guid=guid("e4b70372-261f-4c54-8fa6-a5a7914d73da"), event_id=7, version=0)
class Microsoft_Antimalware_Protection_7_0(Etw):
pattern = Struct(
"Description" / WString
)
| 28.754717 | 123 | 0.726378 |
acedfe1087be94e1533d833f189d871ef08b526b | 9,293 | py | Python | train_boots.py | kgraczyk/objects_counting_dmap | cf249763c62d82cdc6a97536943f584a7ffce265 | [
"Apache-2.0"
] | null | null | null | train_boots.py | kgraczyk/objects_counting_dmap | cf249763c62d82cdc6a97536943f584a7ffce265 | [
"Apache-2.0"
] | null | null | null | train_boots.py | kgraczyk/objects_counting_dmap | cf249763c62d82cdc6a97536943f584a7ffce265 | [
"Apache-2.0"
] | null | null | null | """Main script used to train networks."""
import os
from typing import Union, Optional, List
import click
import torch
import numpy as np
from matplotlib import pyplot
from pathlib import Path
from data_loader import H5Dataset
from looper import Looper
from model import UNet, UNet2, FCRN_A
@click.command()
@click.option('-d', '--dataset_name',
type=click.Choice(['cell', 'mall', 'ucsd','nocover']),
required=True,
help='Dataset to train model on (expect proper HDF5 files).')
@click.option('-n', '--network_architecture',
type=click.Choice(['UNet','UNet2', 'FCRN_A']),
required=True,
help='Model to train.')
@click.option('-lr', '--learning_rate', default=1e-2,
help='Initial learning rate (lr_scheduler is applied).')
@click.option('-e', '--epochs', default=150, help='Number of training epochs.')
@click.option('--batch_size', default=8,
help='Batch size for both training and validation dataloaders.')
@click.option('-hf', '--horizontal_flip', default=0.0,
help='The probability of horizontal flip for training dataset.')
@click.option('-vf', '--vertical_flip', default=0.0,
help='The probability of horizontal flip for validation dataset.')
@click.option('--unet_filters', default=64,
help='Number of filters for U-Net convolutional layers.')
@click.option('--convolutions', default=2,
help='Number of layers in a convolutional block.')
@click.option('--plot', is_flag=True, help="Generate a live plot.")
def train_boots(dataset_name: str,
network_architecture: str,
learning_rate: float,
epochs: int,
batch_size: int,
horizontal_flip: float,
vertical_flip: float,
unet_filters: int,
convolutions: int,
plot: bool):
"""Train chosen model on selected dataset."""
# use GPU if avilable
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
dataset = {} # training and validation HDF5-based datasets
dataloader = {} # training and validation dataloaders
# only UCSD dataset provides greyscale images instead of RGB
input_channels = 1 if dataset_name == 'ucsd' else 3
# prob
epochs__ = 'epochs='+str(epochs)
batch_size__ = 'batch='+str(batch_size)
horizontal_flip__ = 'hf='+str(horizontal_flip)
vertical_flip__ = 'vf=' + str(vertical_flip)
unet_filters__ = 'uf=' + str(unet_filters)
convolutions__ = "conv"+str(convolutions)
dirr = 'boots_results_'+dataset_name
Path(dirr).mkdir(parents=True, exist_ok=True)
# if plot flag is on, create a live plot (to be updated by Looper)
if plot:
pyplot.ion()
fig, plots = pyplot.subplots(nrows=2, ncols=2)
else:
plots = [None] * 2
for mode in ['train', 'valid']:
# expected HDF5 files in dataset_name/(train | valid).h5
data_path = os.path.join(dataset_name, f"{mode}.h5")
# turn on flips only for training dataset
dataset[mode] = H5Dataset(data_path,
horizontal_flip if mode == 'train' else 0,
vertical_flip if mode == 'train' else 0)
#train_indices = torch.zeros_like(dataset[mode].shape[0])
n_samples = len(dataset['train'])
#print("******", n_samples)
sampling_ratio = int(0.63*n_samples)
results_train = []
results_test = []
for i in range(20):
# initialize a model based on chosen network_architecture
network = {
'UNet': UNet,
'UNet2': UNet2,
'FCRN_A': FCRN_A,
}[network_architecture](input_filters=input_channels,
filters=unet_filters,
N=convolutions,p=0).to(device)
network = torch.nn.DataParallel(network)
# initialize loss, optimized and learning rate scheduler
loss = torch.nn.MSELoss()
optimizer = torch.optim.SGD(network.parameters(),
lr=learning_rate,
momentum=0.9,
weight_decay=1e-5)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=20,
gamma=0.1)
ntrain = torch.randperm(n_samples)[:sampling_ratio]
sampler=torch.utils.data.SubsetRandomSampler(ntrain)
dataloader['train'] = torch.utils.data.DataLoader(dataset['train'],
batch_size=batch_size,sampler=sampler)
dataloadertrain2 = torch.utils.data.DataLoader(dataset['train'],
batch_size=batch_size)
dataloader['valid'] = torch.utils.data.DataLoader(dataset['valid'],
batch_size=1)
# create training and validation Loopers to handle a single epoch
train_looper = Looper(network, device, loss, optimizer,
dataloader['train'], len(dataset['train']), plots[0],False)
train_looper.LOG=True
# current best results (lowest mean absolute error on validation set)
current_best = np.infty
for epoch in range(epochs):
print(f"Epoch {epoch + 1}\n")
# run training epoch and update learning rate
result = train_looper.run()
lr_scheduler.step()
# update checkpoint if new best is reached
if result < current_best:
current_best = result
torch.save(network.state_dict(),
os.path.join(dirr,f'{dataset_name}_boot_i={i}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.pth'))
hist = []
#hist.append(valid_looper.history[-1])
hist.append(train_looper.history[-1])
#hist = np.array(hist)
#print(hist)
np.savetxt(os.path.join(dirr,f'hist_best_boot_{dataset_name}_{network_architecture}_i={i}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.csv')
,hist, delimiter=',')
print(f"\nNew best result: {result}")
print("\n", "-"*80, "\n", sep='')
if plot:
fig.savefig(os.path.join(dirr,f'status_boot_i={i}_{dataset_name}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.png'))
network.load_state_dict(torch.load(os.path.join(dirr,f'{dataset_name}_boot_i={i}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.pth')))
valid_looper = Looper(network, device, loss, optimizer,
dataloader['valid'], len(dataset['valid']), None, False,
validation=True)
train_looper2 = Looper(network, device, loss, optimizer,
dataloadertrain2, len(dataloadertrain2), None, False,
validation=True)
valid_looper.LOG=False
train_looper2.LOG=False
valid_looper.MC=False
with torch.no_grad():
valid_looper.run()
train_looper2.run()
if i==0:
results_train.append(train_looper2.true_values)
results_test.append(valid_looper.true_values)
results_train.append(train_looper2.predicted_values)
results_test.append(valid_looper.predicted_values)
print(f"[Training done] Best result: {current_best}")
hist = np.array(train_looper.history)
np.savetxt(os.path.join(dirr,f'hist_train_boot_i={i}_{dataset_name}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.csv') ,hist,delimiter=',')
hist = np.array(valid_looper.history)
np.savetxt(os.path.join(dirr,f'hist_test_boot_i={i}_{dataset_name}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.csv') , hist,delimiter=',')
results_train=np.array(results_train)
results_train = results_train.transpose()
np.savetxt(os.path.join(dirr,f'predicted_train_best_boot_{dataset_name}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.csv')
,results_train, delimiter=',')
results_test=np.array(results_test)
results_test = results_test.transpose()
np.savetxt(os.path.join(dirr,f'predicted_test_best_boot_{dataset_name}_{network_architecture}_{epochs__}_{batch_size__}_{horizontal_flip__}_{vertical_flip__}_{unet_filters__}_{convolutions__}.csv')
,results_test, delimiter=',')
if __name__ == '__main__':
train_boots()
| 40.758772 | 223 | 0.612074 |
acedfe4022c14046963f94a46e5489391e5ec6dc | 8,862 | py | Python | tfx/examples/airflow_workshop/setup/dags/taxi_pipeline.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | tfx/examples/airflow_workshop/setup/dags/taxi_pipeline.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | 1 | 2021-02-24T00:55:55.000Z | 2021-02-24T01:16:36.000Z | tfx/examples/airflow_workshop/setup/dags/taxi_pipeline.py | fanszoro/tfx | b1acab7bf89ec1364c96b9b4e2cc41594407b86c | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# pylint: disable=unused-import
# pylint: disable=unused-argument
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
from tfx.components import CsvExampleGen
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
# from tfx.components import StatisticsGen # Step 3
# from tfx.components import SchemaGen # Step 3
# from tfx.components import ExampleValidator # Step 3
# from tfx.components import Transform # Step 4
# from tfx.components import Trainer # Step 5
# from tfx.proto import trainer_pb2 # Step 5
# import tensorflow_model_analysis as tfma # Step 5
# from tfx.components import Evaluator # Step 6
# from tfx.components import ResolverNode # Step 6
# from tfx.dsl.experimental import latest_blessed_model_resolver # Step 6
# from tfx.components import Pusher # Step 7
# from tfx.proto import pusher_pb2 # Step 7
_pipeline_name = 'taxi'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'airflow')
_data_root = os.path.join(_taxi_root, 'data', 'taxi_data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'dags', 'taxi_utils_solution.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
# statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Step 3
# Generates schema based on statistics files.
# infer_schema = SchemaGen( # Step 3
# statistics=statistics_gen.outputs['statistics'], # Step 3
# infer_feature_shape=False) # Step 3
# Performs anomaly detection based on statistics and data schema.
# validate_stats = ExampleValidator( # Step 3
# statistics=statistics_gen.outputs['statistics'], # Step 3
# schema=infer_schema.outputs['schema']) # Step 3
# Performs transformations and feature engineering in training and serving.
# transform = Transform( # Step 4
# examples=example_gen.outputs['examples'], # Step 4
# schema=infer_schema.outputs['schema'], # Step 4
# module_file=module_file) # Step 4
# Uses user-provided Python function that implements a model using TF-Learn.
# trainer = Trainer( # Step 5
# module_file=module_file, # Step 5
# custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), # Step 5
# examples=transform.outputs['transformed_examples'], # Step 5
# transform_graph=transform.outputs['transform_graph'], # Step 5
# schema=infer_schema.outputs['schema'], # Step 5
# train_args=trainer_pb2.TrainArgs(num_steps=10000), # Step 5
# eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Step 5
# Get the latest blessed model for model validation.
# model_resolver = ResolverNode( # Step 6
# instance_name='latest_blessed_model_resolver', # Step 6
# resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, # Step 6
# model=Channel(type=Model), # Step 6
# model_blessing=Channel(type=ModelBlessing)) # Step 6
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
# eval_config = tfma.EvalConfig( # Step 6
# model_specs=[tfma.ModelSpec(label_key='tips')], # Step 6
# slicing_specs=[tfma.SlicingSpec()], # Step 6
# metrics_specs=[ # Step 6
# tfma.MetricsSpec(metrics=[ # Step 6
# tfma.MetricConfig( # Step 6
# class_name='BinaryAccuracy', # Step 6
# threshold=tfma.MetricThreshold( # Step 6
# value_threshold=tfma.GenericValueThreshold( # Step 6
# lower_bound={'value': 0.6}), # Step 6
# change_threshold=tfma.GenericChangeThreshold( # Step 6
# direction=tfma.MetricDirection.HIGHER_IS_BETTER, # Step 6
# absolute={'value': -1e-10}))) # Step 6
# ]) # Step 6
# ]) # Step 6
# model_analyzer = Evaluator( # Step 6
# examples=example_gen.outputs['examples'], # Step 6
# model=trainer.outputs['model'], # Step 6
# baseline_model=model_resolver.outputs['model'], # Step 6
# # Change threshold will be ignored if there is no baseline (first run). # Step 6
# eval_config=eval_config) # Step 6
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
# pusher = Pusher( # Step 7
# model=trainer.outputs['model'], # Step 7
# model_blessing=model_analyzer.outputs['blessing'], # Step 7
# push_destination=pusher_pb2.PushDestination( # Step 7
# filesystem=pusher_pb2.PushDestination.Filesystem( # Step 7
# base_directory=serving_model_dir))) # Step 7
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
# statistics_gen, # Step 3
# infer_schema, # Step 3
# validate_stats, # Step 3
# transform, # Step 4
# trainer, # Step 5
# model_resolver, # Step 6
# model_analyzer, # Step 6
# pusher, # Step 7
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args))
| 42.811594 | 89 | 0.713609 |
acedfe6617a3cadc10fe6486b8c27a6b46166eea | 49,106 | py | Python | numba/core/typing/templates.py | numba/numba | 8e6fa5690fbe4138abf69263363be85987891e8b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 6,620 | 2015-01-04T08:51:04.000Z | 2022-03-31T12:52:18.000Z | numba/core/typing/templates.py | numba/numba | 8e6fa5690fbe4138abf69263363be85987891e8b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 6,457 | 2015-01-04T03:18:41.000Z | 2022-03-31T17:38:42.000Z | numba/core/typing/templates.py | numba/numba | 8e6fa5690fbe4138abf69263363be85987891e8b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 930 | 2015-01-25T02:33:03.000Z | 2022-03-30T14:10:32.000Z | """
Define typing templates
"""
from abc import ABC, abstractmethod
import functools
import sys
import inspect
import os.path
from collections import namedtuple
from collections.abc import Sequence
from types import MethodType, FunctionType
import numba
from numba.core import types, utils
from numba.core.errors import (
TypingError,
InternalError,
InternalTargetMismatchError,
)
from numba.core.cpu_options import InlineOptions
# info store for inliner callback functions e.g. cost model
_inline_info = namedtuple('inline_info',
'func_ir typemap calltypes signature')
class Signature(object):
"""
The signature of a function call or operation, i.e. its argument types
and return type.
"""
# XXX Perhaps the signature should be a BoundArguments, instead
# of separate args and pysig...
__slots__ = '_return_type', '_args', '_recvr', '_pysig'
def __init__(self, return_type, args, recvr, pysig=None):
if isinstance(args, list):
args = tuple(args)
self._return_type = return_type
self._args = args
self._recvr = recvr
self._pysig = pysig
@property
def return_type(self):
return self._return_type
@property
def args(self):
return self._args
@property
def recvr(self):
return self._recvr
@property
def pysig(self):
return self._pysig
def replace(self, **kwargs):
"""Copy and replace the given attributes provided as keyword arguments.
Returns an updated copy.
"""
curstate = dict(return_type=self.return_type,
args=self.args,
recvr=self.recvr,
pysig=self.pysig)
curstate.update(kwargs)
return Signature(**curstate)
def __getstate__(self):
"""
Needed because of __slots__.
"""
return self._return_type, self._args, self._recvr, self._pysig
def __setstate__(self, state):
"""
Needed because of __slots__.
"""
self._return_type, self._args, self._recvr, self._pysig = state
def __hash__(self):
return hash((self.args, self.return_type))
def __eq__(self, other):
if isinstance(other, Signature):
return (self.args == other.args and
self.return_type == other.return_type and
self.recvr == other.recvr and
self.pysig == other.pysig)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s -> %s" % (self.args, self.return_type)
@property
def is_method(self):
"""
Whether this signature represents a bound method or a regular
function.
"""
return self.recvr is not None
def as_method(self):
"""
Convert this signature to a bound method signature.
"""
if self.recvr is not None:
return self
sig = signature(self.return_type, *self.args[1:],
recvr=self.args[0])
# Adjust the python signature
params = list(self.pysig.parameters.values())[1:]
sig = sig.replace(
pysig=utils.pySignature(
parameters=params,
return_annotation=self.pysig.return_annotation,
),
)
return sig
def as_function(self):
"""
Convert this signature to a regular function signature.
"""
if self.recvr is None:
return self
sig = signature(self.return_type, *((self.recvr,) + self.args))
return sig
def as_type(self):
"""
Convert this signature to a first-class function type.
"""
return types.FunctionType(self)
def __unliteral__(self):
return signature(types.unliteral(self.return_type),
*map(types.unliteral, self.args))
def dump(self, tab=''):
c = self.as_type()._code
print(f'{tab}DUMP {type(self).__name__} [type code: {c}]')
print(f'{tab} Argument types:')
for a in self.args:
a.dump(tab=tab + ' | ')
print(f'{tab} Return type:')
self.return_type.dump(tab=tab + ' | ')
print(f'{tab}END DUMP')
def is_precise(self):
for atype in self.args:
if not atype.is_precise():
return False
return self.return_type.is_precise()
def make_concrete_template(name, key, signatures):
baseclasses = (ConcreteTemplate,)
gvars = dict(key=key, cases=list(signatures))
return type(name, baseclasses, gvars)
def make_callable_template(key, typer, recvr=None):
"""
Create a callable template with the given key and typer function.
"""
def generic(self):
return typer
name = "%s_CallableTemplate" % (key,)
bases = (CallableTemplate,)
class_dict = dict(key=key, generic=generic, recvr=recvr)
return type(name, bases, class_dict)
def signature(return_type, *args, **kws):
recvr = kws.pop('recvr', None)
assert not kws
return Signature(return_type, args, recvr=recvr)
def fold_arguments(pysig, args, kws, normal_handler, default_handler,
stararg_handler):
"""
Given the signature *pysig*, explicit *args* and *kws*, resolve
omitted arguments and keyword arguments. A tuple of positional
arguments is returned.
Various handlers allow to process arguments:
- normal_handler(index, param, value) is called for normal arguments
- default_handler(index, param, default) is called for omitted arguments
- stararg_handler(index, param, values) is called for a "*args" argument
"""
if isinstance(kws, Sequence):
# Normalize dict kws
kws = dict(kws)
# deal with kwonly args
params = pysig.parameters
kwonly = []
for name, p in params.items():
if p.kind == p.KEYWORD_ONLY:
kwonly.append(name)
if kwonly:
bind_args = args[:-len(kwonly)]
else:
bind_args = args
bind_kws = kws.copy()
if kwonly:
for idx, n in enumerate(kwonly):
bind_kws[n] = args[len(kwonly) + idx]
# now bind
ba = pysig.bind(*bind_args, **bind_kws)
for i, param in enumerate(pysig.parameters.values()):
name = param.name
default = param.default
if param.kind == param.VAR_POSITIONAL:
# stararg may be omitted, in which case its "default" value
# is simply the empty tuple
if name in ba.arguments:
argval = ba.arguments[name]
# NOTE: avoid wrapping the tuple type for stararg in another
# tuple.
if (len(argval) == 1 and
isinstance(argval[0], (types.StarArgTuple,
types.StarArgUniTuple))):
argval = tuple(argval[0])
else:
argval = ()
out = stararg_handler(i, param, argval)
ba.arguments[name] = out
elif name in ba.arguments:
# Non-stararg, present
ba.arguments[name] = normal_handler(i, param, ba.arguments[name])
else:
# Non-stararg, omitted
assert default is not param.empty
ba.arguments[name] = default_handler(i, param, default)
# Collect args in the right order
args = tuple(ba.arguments[param.name]
for param in pysig.parameters.values())
return args
class FunctionTemplate(ABC):
# Set to true to disable unsafe cast.
# subclass overide-able
unsafe_casting = True
# Set to true to require exact match without casting.
# subclass overide-able
exact_match_required = False
# Set to true to prefer literal arguments.
# Useful for definitions that specialize on literal but also support
# non-literals.
# subclass overide-able
prefer_literal = False
# metadata
metadata = {}
def __init__(self, context):
self.context = context
def _select(self, cases, args, kws):
options = {
'unsafe_casting': self.unsafe_casting,
'exact_match_required': self.exact_match_required,
}
selected = self.context.resolve_overload(self.key, cases, args, kws,
**options)
return selected
def get_impl_key(self, sig):
"""
Return the key for looking up the implementation for the given
signature on the target context.
"""
# Lookup the key on the class, to avoid binding it with `self`.
key = type(self).key
# On Python 2, we must also take care about unbound methods
if isinstance(key, MethodType):
assert key.im_self is None
key = key.im_func
return key
@classmethod
def get_source_code_info(cls, impl):
"""
Gets the source information about function impl.
Returns:
code - str: source code as a string
firstlineno - int: the first line number of the function impl
path - str: the path to file containing impl
if any of the above are not available something generic is returned
"""
try:
code, firstlineno = inspect.getsourcelines(impl)
except OSError: # missing source, probably a string
code = "None available (built from string?)"
firstlineno = 0
path = inspect.getsourcefile(impl)
if path is None:
path = "<unknown> (built from string?)"
return code, firstlineno, path
@abstractmethod
def get_template_info(self):
"""
Returns a dictionary with information specific to the template that will
govern how error messages are displayed to users. The dictionary must
be of the form:
info = {
'kind': "unknown", # str: The kind of template, e.g. "Overload"
'name': "unknown", # str: The name of the source function
'sig': "unknown", # str: The signature(s) of the source function
'filename': "unknown", # str: The filename of the source function
'lines': ("start", "end"), # tuple(int, int): The start and
end line of the source function.
'docstring': "unknown" # str: The docstring of the source function
}
"""
pass
def __str__(self):
info = self.get_template_info()
srcinfo = f"{info['filename']}:{info['lines'][0]}"
return f"<{self.__class__.__name__} {srcinfo}>"
__repr__ = __str__
class AbstractTemplate(FunctionTemplate):
"""
Defines method ``generic(self, args, kws)`` which compute a possible
signature base on input types. The signature does not have to match the
input types. It is compared against the input types afterwards.
"""
def apply(self, args, kws):
generic = getattr(self, "generic")
sig = generic(args, kws)
# Enforce that *generic()* must return None or Signature
if sig is not None:
if not isinstance(sig, Signature):
raise AssertionError(
"generic() must return a Signature or None. "
"{} returned {}".format(generic, type(sig)),
)
# Unpack optional type if no matching signature
if not sig and any(isinstance(x, types.Optional) for x in args):
def unpack_opt(x):
if isinstance(x, types.Optional):
return x.type
else:
return x
args = list(map(unpack_opt, args))
assert not kws # Not supported yet
sig = generic(args, kws)
return sig
def get_template_info(self):
impl = getattr(self, "generic")
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "overload",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
class CallableTemplate(FunctionTemplate):
"""
Base class for a template defining a ``generic(self)`` method
returning a callable to be called with the actual ``*args`` and
``**kwargs`` representing the call signature. The callable has
to return a return type, a full signature, or None. The signature
does not have to match the input types. It is compared against the
input types afterwards.
"""
recvr = None
def apply(self, args, kws):
generic = getattr(self, "generic")
typer = generic()
match_sig = inspect.signature(typer)
try:
match_sig.bind(*args, **kws)
except TypeError as e:
# bind failed, raise, if there's a
# ValueError then there's likely unrecoverable
# problems
raise TypingError(str(e)) from e
sig = typer(*args, **kws)
# Unpack optional type if no matching signature
if sig is None:
if any(isinstance(x, types.Optional) for x in args):
def unpack_opt(x):
if isinstance(x, types.Optional):
return x.type
else:
return x
args = list(map(unpack_opt, args))
sig = typer(*args, **kws)
if sig is None:
return
# Get the pysig
try:
pysig = typer.pysig
except AttributeError:
pysig = utils.pysignature(typer)
# Fold any keyword arguments
bound = pysig.bind(*args, **kws)
if bound.kwargs:
raise TypingError("unsupported call signature")
if not isinstance(sig, Signature):
# If not a signature, `sig` is assumed to be the return type
if not isinstance(sig, types.Type):
raise TypeError("invalid return type for callable template: "
"got %r" % (sig,))
sig = signature(sig, *bound.args)
if self.recvr is not None:
sig = sig.replace(recvr=self.recvr)
# Hack any omitted parameters out of the typer's pysig,
# as lowering expects an exact match between formal signature
# and actual args.
if len(bound.args) < len(pysig.parameters):
parameters = list(pysig.parameters.values())[:len(bound.args)]
pysig = pysig.replace(parameters=parameters)
sig = sig.replace(pysig=pysig)
cases = [sig]
return self._select(cases, bound.args, bound.kwargs)
def get_template_info(self):
impl = getattr(self, "generic")
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "overload",
'name': getattr(self.key, '__name__',
getattr(impl, '__qualname__', impl.__name__),),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
class ConcreteTemplate(FunctionTemplate):
"""
Defines attributes "cases" as a list of signature to match against the
given input types.
"""
def apply(self, args, kws):
cases = getattr(self, 'cases')
return self._select(cases, args, kws)
def get_template_info(self):
import operator
name = getattr(self.key, '__name__', "unknown")
op_func = getattr(operator, name, None)
kind = "Type restricted function"
if op_func is not None:
if self.key is op_func:
kind = "operator overload"
info = {
'kind': kind,
'name': name,
'sig': "unknown",
'filename': "unknown",
'lines': ("unknown", "unknown"),
'docstring': "unknown"
}
return info
class _EmptyImplementationEntry(InternalError):
def __init__(self, reason):
super(_EmptyImplementationEntry, self).__init__(
"_EmptyImplementationEntry({!r})".format(reason),
)
class _OverloadFunctionTemplate(AbstractTemplate):
"""
A base class of templates for overload functions.
"""
def _validate_sigs(self, typing_func, impl_func):
# check that the impl func and the typing func have the same signature!
typing_sig = utils.pysignature(typing_func)
impl_sig = utils.pysignature(impl_func)
# the typing signature is considered golden and must be adhered to by
# the implementation...
# Things that are valid:
# 1. args match exactly
# 2. kwargs match exactly in name and default value
# 3. Use of *args in the same location by the same name in both typing
# and implementation signature
# 4. Use of *args in the implementation signature to consume any number
# of arguments in the typing signature.
# Things that are invalid:
# 5. Use of *args in the typing signature that is not replicated
# in the implementing signature
# 6. Use of **kwargs
def get_args_kwargs(sig):
kws = []
args = []
pos_arg = None
for x in sig.parameters.values():
if x.default == utils.pyParameter.empty:
args.append(x)
if x.kind == utils.pyParameter.VAR_POSITIONAL:
pos_arg = x
elif x.kind == utils.pyParameter.VAR_KEYWORD:
msg = ("The use of VAR_KEYWORD (e.g. **kwargs) is "
"unsupported. (offending argument name is '%s')")
raise InternalError(msg % x)
else:
kws.append(x)
return args, kws, pos_arg
ty_args, ty_kws, ty_pos = get_args_kwargs(typing_sig)
im_args, im_kws, im_pos = get_args_kwargs(impl_sig)
sig_fmt = ("Typing signature: %s\n"
"Implementation signature: %s")
sig_str = sig_fmt % (typing_sig, impl_sig)
err_prefix = "Typing and implementation arguments differ in "
a = ty_args
b = im_args
if ty_pos:
if not im_pos:
# case 5. described above
msg = ("VAR_POSITIONAL (e.g. *args) argument kind (offending "
"argument name is '%s') found in the typing function "
"signature, but is not in the implementing function "
"signature.\n%s") % (ty_pos, sig_str)
raise InternalError(msg)
else:
if im_pos:
# no *args in typing but there's a *args in the implementation
# this is case 4. described above
b = im_args[:im_args.index(im_pos)]
try:
a = ty_args[:ty_args.index(b[-1]) + 1]
except ValueError:
# there's no b[-1] arg name in the ty_args, something is
# very wrong, we can't work out a diff (*args consumes
# unknown quantity of args) so just report first error
specialized = "argument names.\n%s\nFirst difference: '%s'"
msg = err_prefix + specialized % (sig_str, b[-1])
raise InternalError(msg)
def gen_diff(typing, implementing):
diff = set(typing) ^ set(implementing)
return "Difference: %s" % diff
if a != b:
specialized = "argument names.\n%s\n%s" % (sig_str, gen_diff(a, b))
raise InternalError(err_prefix + specialized)
# ensure kwargs are the same
ty = [x.name for x in ty_kws]
im = [x.name for x in im_kws]
if ty != im:
specialized = "keyword argument names.\n%s\n%s"
msg = err_prefix + specialized % (sig_str, gen_diff(ty_kws, im_kws))
raise InternalError(msg)
same = [x.default for x in ty_kws] == [x.default for x in im_kws]
if not same:
specialized = "keyword argument default values.\n%s\n%s"
msg = err_prefix + specialized % (sig_str, gen_diff(ty_kws, im_kws))
raise InternalError(msg)
def generic(self, args, kws):
"""
Type the overloaded function by compiling the appropriate
implementation for the given args.
"""
from numba.core.typed_passes import PreLowerStripPhis
disp, new_args = self._get_impl(args, kws)
if disp is None:
return
# Compile and type it for the given types
disp_type = types.Dispatcher(disp)
# Store the compiled overload for use in the lowering phase if there's
# no inlining required (else functions are being compiled which will
# never be used as they are inlined)
if not self._inline.is_never_inline:
# need to run the compiler front end up to type inference to compute
# a signature
from numba.core import typed_passes, compiler
from numba.core.inline_closurecall import InlineWorker
fcomp = disp._compiler
flags = compiler.Flags()
# Updating these causes problems?!
#fcomp.targetdescr.options.parse_as_flags(flags,
# fcomp.targetoptions)
#flags = fcomp._customize_flags(flags)
# spoof a compiler pipline like the one that will be in use
tyctx = fcomp.targetdescr.typing_context
tgctx = fcomp.targetdescr.target_context
compiler_inst = fcomp.pipeline_class(tyctx, tgctx, None, None, None,
flags, None, )
inline_worker = InlineWorker(tyctx, tgctx, fcomp.locals,
compiler_inst, flags, None,)
# If the inlinee contains something to trigger literal arg dispatch
# then the pipeline call will unconditionally fail due to a raised
# ForceLiteralArg exception. Therefore `resolve` is run first, as
# type resolution must occur at some point, this will hit any
# `literally` calls and because it's going via the dispatcher will
# handle them correctly i.e. ForceLiteralArg propagates. This having
# the desired effect of ensuring the pipeline call is only made in
# situations that will succeed. For context see #5887.
resolve = disp_type.dispatcher.get_call_template
template, pysig, folded_args, kws = resolve(new_args, kws)
ir = inline_worker.run_untyped_passes(
disp_type.dispatcher.py_func, enable_ssa=True
)
(
typemap,
return_type,
calltypes,
_
) = typed_passes.type_inference_stage(
self.context, tgctx, ir, folded_args, None)
ir = PreLowerStripPhis()._strip_phi_nodes(ir)
ir._definitions = numba.core.ir_utils.build_definitions(ir.blocks)
sig = Signature(return_type, folded_args, None)
# this stores a load of info for the cost model function if supplied
# it by default is None
self._inline_overloads[sig.args] = {'folded_args': folded_args}
# this stores the compiled overloads, if there's no compiled
# overload available i.e. function is always inlined, the key still
# needs to exist for type resolution
# NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
# the inliner has failed to inline this entry corretly.
impl_init = _EmptyImplementationEntry('always inlined')
self._compiled_overloads[sig.args] = impl_init
if not self._inline.is_always_inline:
# this branch is here because a user has supplied a function to
# determine whether to inline or not. As a result both compiled
# function and inliner info needed, delaying the computation of
# this leads to an internal state mess at present. TODO: Fix!
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
# store the inliner information, it's used later in the cost
# model function call
iinfo = _inline_info(ir, typemap, calltypes, sig)
self._inline_overloads[sig.args] = {'folded_args': folded_args,
'iinfo': iinfo}
else:
sig = disp_type.get_call_type(self.context, new_args, kws)
if sig is None: # can't resolve for this target
return None
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
return sig
def _get_impl(self, args, kws):
"""Get implementation given the argument types.
Returning a Dispatcher object. The Dispatcher object is cached
internally in `self._impl_cache`.
"""
flags = utils.ConfigStack.top_or_none()
cache_key = self.context, tuple(args), tuple(kws.items()), flags
try:
impl, args = self._impl_cache[cache_key]
return impl, args
except KeyError:
# pass and try outside the scope so as to not have KeyError with a
# nested addition error in the case the _build_impl fails
pass
impl, args = self._build_impl(cache_key, args, kws)
return impl, args
def _get_jit_decorator(self):
"""Gets a jit decorator suitable for the current target"""
jitter_str = self.metadata.get('target', None)
if jitter_str is None:
from numba import jit
# There is no target requested, use default, this preserves
# original behaviour
jitter = lambda *args, **kwargs: jit(*args, nopython=True, **kwargs)
else:
from numba.core.target_extension import (target_registry,
get_local_target,
jit_registry)
# target has been requested, see what it is...
jitter = jit_registry.get(jitter_str, None)
if jitter is None:
# No JIT known for target string, see if something is
# registered for the string and report if not.
target_class = target_registry.get(jitter_str, None)
if target_class is None:
msg = ("Unknown target '{}', has it been ",
"registered?")
raise ValueError(msg.format(jitter_str))
target_hw = get_local_target(self.context)
# check that the requested target is in the hierarchy for the
# current frame's target.
if not issubclass(target_hw, target_class):
msg = "No overloads exist for the requested target: {}."
jitter = jit_registry[target_hw]
if jitter is None:
raise ValueError("Cannot find a suitable jit decorator")
return jitter
def _build_impl(self, cache_key, args, kws):
"""Build and cache the implementation.
Given the positional (`args`) and keyword arguments (`kws`), obtains
the `overload` implementation and wrap it in a Dispatcher object.
The expected argument types are returned for use by type-inference.
The expected argument types are only different from the given argument
types if there is an imprecise type in the given argument types.
Parameters
----------
cache_key : hashable
The key used for caching the implementation.
args : Tuple[Type]
Types of positional argument.
kws : Dict[Type]
Types of keyword argument.
Returns
-------
disp, args :
On success, returns `(Dispatcher, Tuple[Type])`.
On failure, returns `(None, None)`.
"""
jitter = self._get_jit_decorator()
# Get the overload implementation for the given types
ov_sig = inspect.signature(self._overload_func)
try:
ov_sig.bind(*args, **kws)
except TypeError as e:
# bind failed, raise, if there's a
# ValueError then there's likely unrecoverable
# problems
raise TypingError(str(e)) from e
else:
ovf_result = self._overload_func(*args, **kws)
if ovf_result is None:
# No implementation => fail typing
self._impl_cache[cache_key] = None, None
return None, None
elif isinstance(ovf_result, tuple):
# The implementation returned a signature that the type-inferencer
# should be using.
sig, pyfunc = ovf_result
args = sig.args
kws = {}
cache_key = None # don't cache
else:
# Regular case
pyfunc = ovf_result
# Check type of pyfunc
if not isinstance(pyfunc, FunctionType):
msg = ("Implementator function returned by `@overload` "
"has an unexpected type. Got {}")
raise AssertionError(msg.format(pyfunc))
# check that the typing and impl sigs match up
if self._strict:
self._validate_sigs(self._overload_func, pyfunc)
# Make dispatcher
jitdecor = jitter(**self._jit_options)
disp = jitdecor(pyfunc)
# Make sure that the implementation can be fully compiled
disp_type = types.Dispatcher(disp)
disp_type.get_call_type(self.context, args, kws)
if cache_key is not None:
self._impl_cache[cache_key] = disp, args
return disp, args
def get_impl_key(self, sig):
"""
Return the key for looking up the implementation for the given
signature on the target context.
"""
return self._compiled_overloads[sig.args]
@classmethod
def get_source_info(cls):
"""Return a dictionary with information about the source code of the
implementation.
Returns
-------
info : dict
- "kind" : str
The implementation kind.
- "name" : str
The name of the function that provided the definition.
- "sig" : str
The formatted signature of the function.
- "filename" : str
The name of the source file.
- "lines": tuple (int, int)
First and list line number.
- "docstring": str
The docstring of the definition.
"""
basepath = os.path.dirname(os.path.dirname(numba.__file__))
impl = cls._overload_func
code, firstlineno, path = cls.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "overload",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
def get_template_info(self):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
impl = self._overload_func
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "overload",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
def make_overload_template(func, overload_func, jit_options, strict,
inline, prefer_literal=False, **kwargs):
"""
Make a template class for function *func* overloaded by *overload_func*.
Compiler options are passed as a dictionary to *jit_options*.
"""
func_name = getattr(func, '__name__', str(func))
name = "OverloadTemplate_%s" % (func_name,)
base = _OverloadFunctionTemplate
dct = dict(key=func, _overload_func=staticmethod(overload_func),
_impl_cache={}, _compiled_overloads={}, _jit_options=jit_options,
_strict=strict, _inline=staticmethod(InlineOptions(inline)),
_inline_overloads={}, prefer_literal=prefer_literal,
metadata=kwargs)
return type(base)(name, (base,), dct)
class _TemplateTargetHelperMixin(object):
"""Mixin for helper methods that assist with target/registry resolution"""
def _get_target_registry(self, reason):
"""Returns the registry for the current target.
Parameters
----------
reason: str
Reason for the resolution. Expects a noun.
Returns
-------
reg : a registry suitable for the current target.
"""
from numba.core.target_extension import (_get_local_target_checked,
dispatcher_registry)
hwstr = self.metadata.get('target', 'generic')
target_hw = _get_local_target_checked(self.context, hwstr, reason)
# Get registry for the current hardware
disp = dispatcher_registry[target_hw]
tgtctx = disp.targetdescr.target_context
# This is all workarounds...
# The issue is that whilst targets shouldn't care about which registry
# in which to register lowering implementations, the CUDA target
# "borrows" implementations from the CPU from specific registries. This
# means that if some impl is defined via @intrinsic, e.g. numba.*unsafe
# modules, _AND_ CUDA also makes use of the same impl, then it's
# required that the registry in use is one that CUDA borrows from. This
# leads to the following expression where by the CPU builtin_registry is
# used if it is in the target context as a known registry (i.e. the
# target installed it) and if it is not then it is assumed that the
# registries for the target are unbound to any other target and so it's
# fine to use any of them as a place to put lowering impls.
#
# NOTE: This will need subsequently fixing again when targets use solely
# the extension APIs to describe their implementation. The issue will be
# that the builtin_registry should contain _just_ the stack allocated
# implementations and low level target invariant things and should not
# be modified further. It should be acceptable to remove the `then`
# branch and just keep the `else`.
# In case the target has swapped, e.g. cuda borrowing cpu, refresh to
# populate.
tgtctx.refresh()
if builtin_registry in tgtctx._registries:
reg = builtin_registry
else:
# Pick a registry in which to install intrinsics
registries = iter(tgtctx._registries)
reg = next(registries)
return reg
class _IntrinsicTemplate(_TemplateTargetHelperMixin, AbstractTemplate):
"""
A base class of templates for intrinsic definition
"""
def generic(self, args, kws):
"""
Type the intrinsic by the arguments.
"""
lower_builtin = self._get_target_registry('intrinsic').lower
cache_key = self.context, args, tuple(kws.items())
try:
return self._impl_cache[cache_key]
except KeyError:
pass
result = self._definition_func(self.context, *args, **kws)
if result is None:
return
[sig, imp] = result
pysig = utils.pysignature(self._definition_func)
# omit context argument from user function
parameters = list(pysig.parameters.values())[1:]
sig = sig.replace(pysig=pysig.replace(parameters=parameters))
self._impl_cache[cache_key] = sig
self._overload_cache[sig.args] = imp
# register the lowering
lower_builtin(imp, *sig.args)(imp)
return sig
def get_impl_key(self, sig):
"""
Return the key for looking up the implementation for the given
signature on the target context.
"""
return self._overload_cache[sig.args]
def get_template_info(self):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
impl = self._definition_func
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "intrinsic",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
def make_intrinsic_template(handle, defn, name, kwargs):
"""
Make a template class for a intrinsic handle *handle* defined by the
function *defn*. The *name* is used for naming the new template class.
"""
base = _IntrinsicTemplate
name = "_IntrinsicTemplate_%s" % (name)
dct = dict(key=handle, _definition_func=staticmethod(defn),
_impl_cache={}, _overload_cache={}, metadata=kwargs)
return type(base)(name, (base,), dct)
class AttributeTemplate(object):
def __init__(self, context):
self.context = context
def resolve(self, value, attr):
return self._resolve(value, attr)
def _resolve(self, value, attr):
fn = getattr(self, "resolve_%s" % attr, None)
if fn is None:
fn = self.generic_resolve
if fn is NotImplemented:
if isinstance(value, types.Module):
return self.context.resolve_module_constants(value, attr)
else:
return None
else:
return fn(value, attr)
else:
return fn(value)
generic_resolve = NotImplemented
class _OverloadAttributeTemplate(_TemplateTargetHelperMixin, AttributeTemplate):
"""
A base class of templates for @overload_attribute functions.
"""
is_method = False
def __init__(self, context):
super(_OverloadAttributeTemplate, self).__init__(context)
self.context = context
self._init_once()
def _init_once(self):
cls = type(self)
attr = cls._attr
lower_getattr = self._get_target_registry('attribute').lower_getattr
@lower_getattr(cls.key, attr)
def getattr_impl(context, builder, typ, value):
typingctx = context.typing_context
fnty = cls._get_function_type(typingctx, typ)
sig = cls._get_signature(typingctx, fnty, (typ,), {})
call = context.get_function(fnty, sig)
return call(builder, (value,))
def _resolve(self, typ, attr):
if self._attr != attr:
return None
fnty = self._get_function_type(self.context, typ)
sig = self._get_signature(self.context, fnty, (typ,), {})
# There should only be one template
for template in fnty.templates:
self._inline_overloads.update(template._inline_overloads)
return sig.return_type
@classmethod
def _get_signature(cls, typingctx, fnty, args, kws):
sig = fnty.get_call_type(typingctx, args, kws)
sig = sig.replace(pysig=utils.pysignature(cls._overload_func))
return sig
@classmethod
def _get_function_type(cls, typingctx, typ):
return typingctx.resolve_value_type(cls._overload_func)
class _OverloadMethodTemplate(_OverloadAttributeTemplate):
"""
A base class of templates for @overload_method functions.
"""
is_method = True
def _init_once(self):
"""
Overriding parent definition
"""
attr = self._attr
try:
registry = self._get_target_registry('method')
except InternalTargetMismatchError:
# Target mismatch. Do not register attribute lookup here.
pass
else:
lower_builtin = registry.lower
@lower_builtin((self.key, attr), self.key, types.VarArg(types.Any))
def method_impl(context, builder, sig, args):
typ = sig.args[0]
typing_context = context.typing_context
fnty = self._get_function_type(typing_context, typ)
sig = self._get_signature(typing_context, fnty, sig.args, {})
call = context.get_function(fnty, sig)
# Link dependent library
context.add_linking_libs(getattr(call, 'libs', ()))
return call(builder, args)
def _resolve(self, typ, attr):
if self._attr != attr:
return None
if isinstance(typ, types.TypeRef):
assert typ == self.key
else:
assert isinstance(typ, self.key)
class MethodTemplate(AbstractTemplate):
key = (self.key, attr)
_inline = self._inline
_overload_func = staticmethod(self._overload_func)
_inline_overloads = self._inline_overloads
prefer_literal = self.prefer_literal
def generic(_, args, kws):
args = (typ,) + tuple(args)
fnty = self._get_function_type(self.context, typ)
sig = self._get_signature(self.context, fnty, args, kws)
sig = sig.replace(pysig=utils.pysignature(self._overload_func))
for template in fnty.templates:
self._inline_overloads.update(template._inline_overloads)
if sig is not None:
return sig.as_method()
return types.BoundFunction(MethodTemplate, typ)
def make_overload_attribute_template(typ, attr, overload_func, inline,
prefer_literal=False,
base=_OverloadAttributeTemplate,
**kwargs):
"""
Make a template class for attribute *attr* of *typ* overloaded by
*overload_func*.
"""
assert isinstance(typ, types.Type) or issubclass(typ, types.Type)
name = "OverloadAttributeTemplate_%s_%s" % (typ, attr)
# Note the implementation cache is subclass-specific
dct = dict(key=typ, _attr=attr, _impl_cache={},
_inline=staticmethod(InlineOptions(inline)),
_inline_overloads={},
_overload_func=staticmethod(overload_func),
prefer_literal=prefer_literal,
metadata=kwargs,
)
obj = type(base)(name, (base,), dct)
return obj
def make_overload_method_template(typ, attr, overload_func, inline,
prefer_literal=False, **kwargs):
"""
Make a template class for method *attr* of *typ* overloaded by
*overload_func*.
"""
return make_overload_attribute_template(
typ, attr, overload_func, inline=inline,
base=_OverloadMethodTemplate, prefer_literal=prefer_literal,
**kwargs,
)
def bound_function(template_key):
"""
Wrap an AttributeTemplate resolve_* method to allow it to
resolve an instance method's signature rather than a instance attribute.
The wrapped method must return the resolved method's signature
according to the given self type, args, and keywords.
It is used thusly:
class ComplexAttributes(AttributeTemplate):
@bound_function("complex.conjugate")
def resolve_conjugate(self, ty, args, kwds):
return ty
*template_key* (e.g. "complex.conjugate" above) will be used by the
target to look up the method's implementation, as a regular function.
"""
def wrapper(method_resolver):
@functools.wraps(method_resolver)
def attribute_resolver(self, ty):
class MethodTemplate(AbstractTemplate):
key = template_key
def generic(_, args, kws):
sig = method_resolver(self, ty, args, kws)
if sig is not None and sig.recvr is None:
sig = sig.replace(recvr=ty)
return sig
return types.BoundFunction(MethodTemplate, ty)
return attribute_resolver
return wrapper
# -----------------------------
class Registry(object):
"""
A registry of typing declarations. The registry stores such declarations
for functions, attributes and globals.
"""
def __init__(self):
self.functions = []
self.attributes = []
self.globals = []
def register(self, item):
assert issubclass(item, FunctionTemplate)
self.functions.append(item)
return item
def register_attr(self, item):
assert issubclass(item, AttributeTemplate)
self.attributes.append(item)
return item
def register_global(self, val=None, typ=None, **kwargs):
"""
Register the typing of a global value.
Functional usage with a Numba type::
register_global(value, typ)
Decorator usage with a template class::
@register_global(value, typing_key=None)
class Template:
...
"""
if typ is not None:
# register_global(val, typ)
assert val is not None
assert not kwargs
self.globals.append((val, typ))
else:
def decorate(cls, typing_key):
class Template(cls):
key = typing_key
if callable(val):
typ = types.Function(Template)
else:
raise TypeError("cannot infer type for global value %r")
self.globals.append((val, typ))
return cls
# register_global(val, typing_key=None)(<template class>)
assert val is not None
typing_key = kwargs.pop('typing_key', val)
assert not kwargs
if typing_key is val:
# Check the value is globally reachable, as it is going
# to be used as the key.
mod = sys.modules[val.__module__]
if getattr(mod, val.__name__) is not val:
raise ValueError("%r is not globally reachable as '%s.%s'"
% (mod, val.__module__, val.__name__))
def decorator(cls):
return decorate(cls, typing_key)
return decorator
class BaseRegistryLoader(object):
"""
An incremental loader for a registry. Each new call to
new_registrations() will iterate over the not yet seen registrations.
The reason for this object is multiple:
- there can be several contexts
- each context wants to install all registrations
- registrations can be added after the first installation, so contexts
must be able to get the "new" installations
Therefore each context maintains its own loaders for each existing
registry, without duplicating the registries themselves.
"""
def __init__(self, registry):
self._registrations = dict(
(name, utils.stream_list(getattr(registry, name)))
for name in self.registry_items)
def new_registrations(self, name):
for item in next(self._registrations[name]):
yield item
class RegistryLoader(BaseRegistryLoader):
"""
An incremental loader for a typing registry.
"""
registry_items = ('functions', 'attributes', 'globals')
builtin_registry = Registry()
infer = builtin_registry.register
infer_getattr = builtin_registry.register_attr
infer_global = builtin_registry.register_global
| 37.117158 | 80 | 0.590356 |
acedff560d8e4f29a2e936f58f6b2bf51f4d0f23 | 1,889 | py | Python | playground/IIC_VAT/main.py | jizongFox/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 34 | 2019-08-05T03:48:36.000Z | 2022-03-29T03:04:51.000Z | playground/IIC_VAT/main.py | jizongFox/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 10 | 2019-05-03T21:02:50.000Z | 2021-12-23T08:01:30.000Z | playground/IIC_VAT/main.py | ETS-Research-Repositories/deep-clustering-toolbox | 0721cbbb278af027409ed4c115ccc743b6daed1b | [
"MIT"
] | 5 | 2019-09-29T07:56:03.000Z | 2021-04-22T12:08:50.000Z | ###############################
# This file is to create experiments using Cifar10 dataset of IIC setting (double head) with acc = ~60.0%
# to verify whether the VAT can help better with IIC.
# This experiment can be long and a pretrained checkpoint can be used to reduce the time.
##############################
from pathlib import Path
from deepclustering.dataset.classification import (
Cifar10ClusteringDatasetInterface,
default_cifar10_img_transform,
)
from deepclustering.manager import ConfigManger
from deepclustering.model import Model
from playground.IIC_VAT.VATIICTrainer import IMSATIICTrainer
DEFAULT_CONFIG = str(Path(__file__).parent / "config.yaml")
config = ConfigManger(DEFAULT_CONFIG_PATH=DEFAULT_CONFIG, verbose=True).config
# create model:
model = Model(
arch_dict=config["Arch"],
optim_dict=config["Optim"],
scheduler_dict=config["Scheduler"],
)
train_loader_A = Cifar10ClusteringDatasetInterface(
**config["DataLoader"]
).ParallelDataLoader(
default_cifar10_img_transform["tf1"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
)
train_loader_B = Cifar10ClusteringDatasetInterface(
**config["DataLoader"]
).ParallelDataLoader(
default_cifar10_img_transform["tf1"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
default_cifar10_img_transform["tf2"],
)
val_loader = Cifar10ClusteringDatasetInterface(
**config["DataLoader"]
).ParallelDataLoader(default_cifar10_img_transform["tf3"])
trainer = IMSATIICTrainer(
model=model,
train_loader_A=train_loader_A,
train_loader_B=train_loader_B,
val_loader=val_loader,
config=config,
**config["Trainer"]
)
trainer.start_training()
trainer.clean_up()
| 32.568966 | 107 | 0.749074 |
acedff756f73849cc06bbaf66352797784fcd4e0 | 420 | py | Python | Solver/__init__.py | szhang-cis/Kuru_Mac | 90caaa37f7917e25afd25de24c06216e202e2e96 | [
"MIT"
] | null | null | null | Solver/__init__.py | szhang-cis/Kuru_Mac | 90caaa37f7917e25afd25de24c06216e202e2e96 | [
"MIT"
] | null | null | null | Solver/__init__.py | szhang-cis/Kuru_Mac | 90caaa37f7917e25afd25de24c06216e202e2e96 | [
"MIT"
] | 1 | 2021-04-22T10:43:44.000Z | 2021-04-22T10:43:44.000Z | from .LinearSolver import LinearSolver
from .FEMSolver import FEMSolver
#from .ShellSolver import ShellSolver
#from .LaplacianSolver import LaplacianSolver
#from .TractionBasedStaggeredSolver import TractionBasedStaggeredSolver
#from .PotentialBasedStaggeredSolver import PotentialBasedStaggeredSolver
#from .CoupleStressSolver import CoupleStressSolver
#from .DetachedParallelFEMSolver import DetachedParallelFEMSolver
| 46.666667 | 73 | 0.890476 |
acedff83a05f2ad9e044ca49e96ca33e53af60da | 6,822 | py | Python | ipfabric/client.py | justinjeffery-ipf/python-ipfabric | 07c04dd7ec646bb509fc0c580561f4318fdb8374 | [
"MIT"
] | 4 | 2021-11-25T10:09:45.000Z | 2022-01-04T14:05:57.000Z | ipfabric/client.py | justinjeffery-ipf/python-ipfabric | 07c04dd7ec646bb509fc0c580561f4318fdb8374 | [
"MIT"
] | 15 | 2021-11-22T14:11:22.000Z | 2022-03-24T14:34:58.000Z | ipfabric/client.py | justinjeffery-ipf/python-ipfabric | 07c04dd7ec646bb509fc0c580561f4318fdb8374 | [
"MIT"
] | 1 | 2021-11-18T17:57:22.000Z | 2021-11-18T17:57:22.000Z | import re
from json import loads
from typing import Optional, Union
from urllib.parse import urlparse
from ipfabric import models
from ipfabric.api import IPFabricAPI
from ipfabric.intent import Intent
DEFAULT_ID = "$last"
def check_format(func):
"""
Checks to make sure api/v1/ is not in the URL and converts filters from json str to dict
"""
def wrapper(self, url, *args, **kwargs):
if "filters" in kwargs and isinstance(kwargs["filters"], str):
kwargs["filters"] = loads(kwargs["filters"])
path = urlparse(url or kwargs["url"]).path
url = path.split("v1/")[1] if "v1/" in path else path
return func(self, url, *args, **kwargs)
return wrapper
class IPFClient(IPFabricAPI):
def __init__(
self, base_url: Optional[str] = None, token: Optional[str] = None, snapshot_id: str = DEFAULT_ID, **kwargs
):
"""
Initializes the IP Fabric Client
:param base_url: str: IP Fabric instance provided in 'base_url' parameter, or the 'IPF_URL' environment variable
:param token: str: API token or 'IPF_TOKEN' environment variable
:param snapshot_id: str: IP Fabric snapshot ID to use by default for database actions - defaults to '$last'
:param kwargs: dict: Keyword args to pass to httpx
"""
super().__init__(base_url, token, snapshot_id, **kwargs)
self.inventory = models.Inventory(client=self)
self.intent = Intent(client=self)
@check_format
def fetch(
self,
url,
columns: Optional[list] = None,
filters: Optional[Union[dict, str]] = None,
limit: Optional[int] = 1000,
start: Optional[int] = 0,
snapshot_id: Optional[str] = None,
reports: Optional[str] = None,
sort: Optional[dict] = None,
snapshot: bool = True
):
"""
Gets data from IP Fabric for specified endpoint
:param url: str: Example tables/vlan/device-summary
:param columns: list: Optional list of columns to return, None will return all
:param filters: dict: Optional dictionary of filters
:param limit: int: Default to 1,000 rows
:param start: int: Starts at 0
:param snapshot_id: str: Optional snapshot_id to override default
:param reports: str: String of frontend URL where the reports are displayed
:param sort: dict: Dictionary to apply sorting: {"order": "desc", "column": "lastChange"}
:param snapshot: bool: Set to False for some tables like management endpoints.
:return: list: List of Dictionary objects.
"""
payload = dict(
columns=columns or self._get_columns(url),
pagination=dict(start=start, limit=limit),
)
if snapshot:
payload["snapshot"] = snapshot_id or self.snapshot_id
if filters:
payload["filters"] = filters
if reports:
payload["reports"] = reports
if sort:
payload["sort"] = sort
res = self.post(url, json=payload)
res.raise_for_status()
return res.json()["data"]
@check_format
def fetch_all(
self,
url: str,
columns: Optional[list] = None,
filters: Optional[Union[dict, str]] = None,
snapshot_id: Optional[str] = None,
reports: Optional[str] = None,
sort: Optional[dict] = None,
snapshot: bool = True
):
"""
Gets all data from IP Fabric for specified endpoint
:param url: str: Example tables/vlan/device-summary
:param columns: list: Optional list of columns to return, None will return all
:param filters: dict: Optional dictionary of filters
:param snapshot_id: str: Optional snapshot_id to override default
:param reports: str: String of frontend URL where the reports are displayed
:param sort: dict: Dictionary to apply sorting: {"order": "desc", "column": "lastChange"}
:param snapshot: bool: Set to False for some tables like management endpoints.
:return: list: List of Dictionary objects.
"""
payload = dict(columns=columns or self._get_columns(url))
if snapshot:
payload["snapshot"] = snapshot_id or self.snapshot_id
if filters:
payload["filters"] = filters
if reports:
payload["reports"] = reports
if sort:
payload["sort"] = sort
return self._ipf_pager(url, payload)
@check_format
def query(self, url: str, payload: Union[str, dict], all: bool = True):
"""
Submits a query, does no formating on the parameters. Use for copy/pasting from the webpage.
:param url: str: Example: https://demo1.ipfabric.io/api/v1/tables/vlan/device-summary
:param payload: Union[str, dict]: Dictionary to submit in POST or can be JSON string (i.e. read from file).
:param all: bool: Default use pager to get all results and ignore pagination information in the payload
:return: list: List of Dictionary objects.
"""
if isinstance(payload, str):
payload = loads(payload)
if all:
return self._ipf_pager(url, payload)
else:
res = self.post(url, json=payload)
res.raise_for_status()
return res.json()["data"]
def _get_columns(self, url: str):
"""
Submits malformed payload and extracts column names from it
:param url: str: API url to post
:return: list: List of column names
"""
r = self.post(url, json=dict(snapshot=self.snapshot_id, columns=["*"]))
if r.status_code == 422:
msg = r.json()["errors"][0]["message"]
return [x.strip() for x in re.match(r"\".*\".*\[(.*)]$", msg).group(1).split(",")]
else:
r.raise_for_status()
def _ipf_pager(
self,
url: str,
payload: dict,
data: Optional[Union[list, None]] = None,
limit: int = 1000,
start: int = 0,
):
"""
Loops through and collects all the data from the tables
:param url: str: Full URL to post to
:param payload: dict: Data to submit to IP Fabric
:param data: list: List of data to append subsequent calls
:param start: int: Where to start for the data
:return: list: List of dictionaries
"""
data = data or list()
payload["pagination"] = dict(limit=limit, start=start)
r = self.post(url, json=payload)
r.raise_for_status()
r = r.json()
data.extend(r["data"])
if limit + start < r["_meta"]["count"]:
self._ipf_pager(url, payload, data, limit=limit, start=start + limit)
return data
| 38.111732 | 120 | 0.6073 |
acee01111765f8dce327cde4e7eb29c16e6f0eee | 2,859 | py | Python | tests/test_common.py | mgrrx/xmlrpcproto | d83075d4323ccea552e7c46dbc9af166a367b382 | [
"Apache-2.0"
] | null | null | null | tests/test_common.py | mgrrx/xmlrpcproto | d83075d4323ccea552e7c46dbc9af166a367b382 | [
"Apache-2.0"
] | null | null | null | tests/test_common.py | mgrrx/xmlrpcproto | d83075d4323ccea552e7c46dbc9af166a367b382 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import pytest
import xmltodict
from lxml import etree
from xmlrpcproto.common import Binary, py2xml, xml2py
CASES = [
(
Binary("you can't read this!".encode()),
"<base64>eW91IGNhbid0IHJlYWQgdGhpcyE=</base64>",
),
(-12.53, "<double>-12.53</double>"),
("Hello world!", "<string>Hello world!</string>"),
(
datetime(year=1998, month=7, day=17, hour=14, minute=8, second=55),
"<dateTime.iso8601>19980717T14:08:55</dateTime.iso8601>",
),
(42, "<i4>42</i4>"),
(True, "<boolean>1</boolean>"),
(False, "<boolean>0</boolean>"),
(None, "<nil/>"),
(
[1404, "Something here", 1],
(
"<array>"
"<data>"
"<value>"
"<i4>1404</i4>"
"</value>"
"<value>"
"<string>Something here</string>"
"</value>"
"<value>"
"<i4>1</i4>"
"</value>"
"</data>"
"</array>"
),
),
(
{"foo": 1},
(
"<struct>"
"<member>"
"<name>foo</name>"
"<value><i4>1</i4></value>"
"</member>"
"</struct>"
),
),
(
[[1, "a"]],
(
"<array>"
"<data>"
"<value>"
"<array>"
"<data>"
"<value>"
"<i4>1</i4>"
"</value>"
"<value>"
"<string>a</string>"
"</value>"
"</data>"
"</array>"
"</value>"
"</data>"
"</array>"
),
),
]
def normalise_dict(d):
"""
Recursively convert dict-like object (eg OrderedDict) into plain dict.
Sorts list values.
"""
out = {}
for k, v in dict(d).items():
if hasattr(v, "iteritems"):
out[k] = normalise_dict(v)
elif isinstance(v, list):
out[k] = []
for item in sorted(v):
if hasattr(item, "iteritems"):
out[k].append(normalise_dict(item))
else:
out[k].append(item)
else:
out[k] = v
return out
@pytest.mark.parametrize("expected,data", CASES)
def test_xml2py(expected, data):
data = etree.fromstring(data)
result = xml2py(data)
assert result == expected
@pytest.mark.parametrize("data,expected", CASES)
def test_py2xml(data, expected):
a = py2xml(data)
b = expected
if not isinstance(a, str):
a = etree.tostring(a, encoding="utf-8")
if not isinstance(b, str):
b = etree.tostring(b, encoding="utf-8")
_a = normalise_dict(xmltodict.parse(a))
_b = normalise_dict(xmltodict.parse(b))
assert _a == _b, "\n %s \n not equal \n %s" % (a.decode(), b)
| 24.02521 | 75 | 0.453655 |
acee01babb61f30d4a13eaf844d8d5e73ba0a4c2 | 445 | py | Python | Day2/first.py | Woody1474747/AOC2021_Solutions | bf2305e7cf2d07fa20d733efbd51c5503f1fb57c | [
"BSD-2-Clause"
] | null | null | null | Day2/first.py | Woody1474747/AOC2021_Solutions | bf2305e7cf2d07fa20d733efbd51c5503f1fb57c | [
"BSD-2-Clause"
] | null | null | null | Day2/first.py | Woody1474747/AOC2021_Solutions | bf2305e7cf2d07fa20d733efbd51c5503f1fb57c | [
"BSD-2-Clause"
] | null | null | null | with open("test.txt") as f:
data = []
for line in f:
sline = line.split(" ")
sline[1] = sline[1].split("\n")[0]
data.append(sline)
depth = 0
horizontal = 0
aim = 0
for line in data:
if line[0] == "forward":
horizontal = horizontal + int(line[1])
elif line[0] == "up":
depth = depth - int(line[1])
elif line[0] == "down":
depth = depth + int(line[1])
print(horizontal*depth) | 21.190476 | 46 | 0.530337 |
acee01bbc45c1b66941fda4189ec1a567aab5d6f | 12,298 | py | Python | pennylane/templates/subroutines/hilbert_schmidt.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/templates/subroutines/hilbert_schmidt.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/templates/subroutines/hilbert_schmidt.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the templates for the Hilbert-Schmidt tests.
"""
# pylint: disable-msg=too-many-arguments
import pennylane as qml
from pennylane.operation import AnyWires, Operation
class HilbertSchmidt(Operation):
r"""Create a Hilbert-Schmidt template that can be used to compute the Hilbert-Schmidt Test (HST).
The HST is a useful quantity used when we want to compile an unitary `U` with an approximate unitary `V`. The HST
is used as a distance between `U` and `V`, the result of executing the HST is 0 if and only if `V` is equal to
`U` (up to a global phase). Therefore we can define a cost by:
.. math::
C_{HST} = 1 - \frac{1}{d^2} \left|Tr(V^{\dagger}U)\right|^2,
where the quantity :math:`\frac{1}{d^2} \left|Tr(V^{\dagger}U)\right|^2` is obtained by executing the
Hilbert-Schmidt Test. It is equivalent to taking the outcome probability of the state :math:`|0 ... 0\rangle`
for the following circuit:
.. figure:: ../../_static/templates/subroutines/hst.png
:align: center
:width: 80%
:target: javascript:void(0);
It defines our decomposition for the Hilbert-Schmidt Test template.
Args:
params (array): Parameters for the quantum function `V`.
v_function (callable): Quantum function that represents the approximate compiled unitary `V`.
v_wires (int or Iterable[Number, str]]): The wire(s) the approximate compiled unitary act on.
u_tape (.QuantumTape): `U`, the unitary to be compiled as a ``qml.tape.QuantumTape``.
Raises:
QuantumFunctionError: The argument ``u_tape`` must be a ``QuantumTape``.
QuantumFunctionError: ``v_function`` is not a valid quantum function.
QuantumFunctionError: ``U`` and ``V`` do not have the same number of wires.
QuantumFunctionError: The wires ``v_wires`` are a subset of ``V`` wires.
QuantumFunctionError: ``u_tape`` and ``v_tape`` must act on distinct wires.
**Reference**
[1] Sumeet Khatri, Ryan LaRose, Alexander Poremba, Lukasz Cincio, Andrew T. Sornborger and Patrick J. Coles
Quantum-assisted Quantum Compiling.
`arxiv/1807.00800 <https://arxiv.org/pdf/1807.00800.pdf>`_
.. seealso:: :class:`~.LocalHilbertSchmidt`
.. UsageDetails::
Consider that we want to evaluate the Hilbert-Schmidt Test cost between the unitary ``U`` and an approximate
unitary ``V``. We need to define some functions where it is possible to use the :class:`~.HilbertSchmidt`
template. Here the considered unitary is ``Hadamard`` and we try to compute the cost for the approximate
unitary ``RZ``. For an angle that is equal to ``0`` (``Identity``), we have the maximal cost which is ``1``.
.. code-block:: python
with qml.tape.QuantumTape(do_queue=False) as u_tape:
qml.Hadamard(wires=0)
def v_function(params):
qml.RZ(params[0], wires=1)
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def hilbert_test(v_params, v_function, v_wires, u_tape):
qml.HilbertSchmidt(v_params, v_function=v_function, v_wires=v_wires, u_tape=u_tape)
return qml.probs(u_tape.wires + v_wires)
def cost_hst(parameters, v_function, v_wires, u_tape):
return (1 - hilbert_test(v_params=parameters, v_function=v_function, v_wires=v_wires, u_tape=u_tape)[0])
Now that the cost function has been defined it can be called for specific parameters:
>>> cost_hst([0], v_function = v_function, v_wires = [1], u_tape = u_tape)
1
"""
num_wires = AnyWires
grad_method = None
def __init__(self, *params, v_function, v_wires, u_tape, do_queue=True, id=None):
self._num_params = len(params)
if not isinstance(u_tape, qml.tape.QuantumTape):
raise qml.QuantumFunctionError("The argument u_tape must be a QuantumTape.")
u_wires = u_tape.wires
self.hyperparameters["u_tape"] = u_tape
if not callable(v_function):
raise qml.QuantumFunctionError(
"The argument v_function must be a callable quantum function."
)
self.hyperparameters["v_function"] = v_function
v_tape = qml.transforms.make_tape(v_function)(*params)
self.hyperparameters["v_tape"] = v_tape
self.hyperparameters["v_wires"] = v_tape.wires
if len(u_wires) != len(v_wires):
raise qml.QuantumFunctionError("U and V must have the same number of wires.")
if not qml.wires.Wires(v_wires).contains_wires(v_tape.wires):
raise qml.QuantumFunctionError("All wires in v_tape must be in v_wires.")
# Intersection of wires
if len(qml.wires.Wires.shared_wires([u_tape.wires, v_tape.wires])) != 0:
raise qml.QuantumFunctionError("u_tape and v_tape must act on distinct wires.")
wires = qml.wires.Wires(u_wires + v_wires)
super().__init__(*params, wires=wires, do_queue=do_queue, id=id)
@property
def num_params(self):
return self._num_params
@staticmethod
def compute_decomposition(
params, wires, u_tape, v_tape, v_function=None, v_wires=None
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators."""
n_wires = len(u_tape.wires + v_tape.wires)
decomp_ops = []
first_range = range(0, int(n_wires / 2))
second_range = range(int(n_wires / 2), n_wires)
# Hadamard first layer
for i in first_range:
decomp_ops.append(qml.Hadamard(wires[i]))
# CNOT first layer
for i, j in zip(first_range, second_range):
decomp_ops.append(qml.CNOT(wires=[wires[i], wires[j]]))
# Unitary U
for op_u in u_tape.operations:
# The operation has been defined outside of this function, to queue it we call qml.apply.
qml.apply(op_u)
decomp_ops.append(op_u)
# Unitary V conjugate
for op_v in v_tape.operations:
decomp_ops.append(op_v.adjoint())
# CNOT second layer
for i, j in zip(reversed(first_range), reversed(second_range)):
decomp_ops.append(qml.CNOT(wires=[wires[i], wires[j]]))
# Hadamard second layer
for i in first_range:
decomp_ops.append(qml.Hadamard(wires[i]))
return decomp_ops
def adjoint(self): # pylint: disable=arguments-differ
adjoint_op = HilbertSchmidt(
*self.parameters,
u_tape=self.hyperparameters["u_tape"],
v_function=self.hyperparameters["v_function"],
v_wires=self.hyperparameters["v_wires"],
)
adjoint_op.inverse = not self.inverse
return adjoint_op
class LocalHilbertSchmidt(HilbertSchmidt):
r"""Create a Local Hilbert-Schmidt template that can be used to compute the Local Hilbert-Schmidt Test (LHST).
The result of the LHST is a useful quantity for compiling a unitary ``U`` with an approximate unitary ``V``. The
LHST is used as a distance between `U` and `V`, it is similar to the Hilbert-Schmidt test, but the measurement is
made only on one qubit at the end of the circuit. The LHST cost is always smaller than the HST cost and is useful
for large unitaries.
.. figure:: ../../_static/templates/subroutines/lhst.png
:align: center
:width: 80%
:target: javascript:void(0);
Args:
params (array): Parameters for the quantum function `V`.
v_function (Callable): Quantum function that represents the approximate compiled unitary `V`.
v_wires (int or Iterable[Number, str]]): the wire(s) the approximate compiled unitary act on.
u_tape (.QuantumTape): `U`, the unitary to be compiled as a ``qml.tape.QuantumTape``.
Raises:
QuantumFunctionError: The argument u_tape must be a QuantumTape
QuantumFunctionError: ``v_function`` is not a valid Quantum function.
QuantumFunctionError: `U` and `V` do not have the same number of wires.
QuantumFunctionError: The wires ``v_wires`` are a subset of `V` wires.
QuantumFunctionError: u_tape and v_tape must act on distinct wires.
**Reference**
[1] Sumeet Khatri, Ryan LaRose, Alexander Poremba, Lukasz Cincio, Andrew T. Sornborger and Patrick J. Coles
Quantum-assisted Quantum Compiling.
`arxiv/1807.00800 <https://arxiv.org/pdf/1807.00800.pdf>`_
.. seealso:: :class:`~.HilbertSchmidt`
.. UsageDetails::
Consider that we want to evaluate the Local Hilbert-Schmidt Test cost between the unitary ``U`` and an
approximate unitary ``V``. We need to define some functions where it is possible to use the
:class:`~.LocalHilbertSchmidt` template. Here the considered unitary is ``CZ`` and we try to compute the
cost for the approximate unitary.
.. code-block:: python
import numpy as np
with qml.tape.QuantumTape(do_queue=False) as u_tape:
qml.CZ(wires=[0,1])
def v_function(params):
qml.RZ(params[0], wires=2)
qml.RZ(params[1], wires=3)
qml.CNOT(wires=[2, 3])
qml.RZ(params[2], wires=3)
qml.CNOT(wires=[2, 3])
dev = qml.device("default.qubit", wires=4)
@qml.qnode(dev)
def local_hilbert_test(v_params, v_function, v_wires, u_tape):
qml.LocalHilbertSchmidt(v_params, v_function=v_function, v_wires=v_wires, u_tape=u_tape)
return qml.probs(u_tape.wires + v_wires)
def cost_lhst(parameters, v_function, v_wires, u_tape):
return (1 - local_hilbert_test(v_params=parameters, v_function=v_function, v_wires=v_wires, u_tape=u_tape)[0])
Now that the cost function has been defined it can be called for specific parameters:
>>> cost_lhst([3*np.pi/2, 3*np.pi/2, np.pi/2], v_function = v_function, v_wires = [1], u_tape = u_tape)
0.5
"""
@staticmethod
def compute_decomposition(
params, wires, u_tape, v_tape, v_function=None, v_wires=None
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators (static method)."""
decomp_ops = []
n_wires = len(u_tape.wires + v_tape.wires)
first_range = range(0, int(n_wires / 2))
second_range = range(int(n_wires / 2), n_wires)
# Hadamard first layer
for i in first_range:
decomp_ops.append(qml.Hadamard(wires[i]))
# CNOT first layer
for i, j in zip(first_range, second_range):
decomp_ops.append(qml.CNOT(wires=[wires[i], wires[j]]))
# Unitary U
for op_u in u_tape.operations:
qml.apply(op_u)
decomp_ops.append(op_u)
# Unitary V conjugate
for op_v in v_tape.operations:
decomp_ops.append(op_v.adjoint())
# Only one CNOT
decomp_ops.append(qml.CNOT(wires=[wires[0], wires[int(n_wires / 2)]]))
# Only one Hadamard
decomp_ops.append(qml.Hadamard(wires[0]))
return decomp_ops
def adjoint(self): # pylint: disable=arguments-differ
adjoint_op = LocalHilbertSchmidt(
*self.parameters,
u_tape=self.hyperparameters["u_tape"],
v_function=self.hyperparameters["v_function"],
v_wires=self.hyperparameters["v_wires"],
)
adjoint_op.inverse = not self.inverse
return adjoint_op
| 40.993333 | 126 | 0.651976 |
acee02fc5712064051a013863afc6588998061e2 | 489 | py | Python | data/scripts/templates/object/building/tatooine/shared_filler_building_tatt_style01_07.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/building/tatooine/shared_filler_building_tatt_style01_07.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/building/tatooine/shared_filler_building_tatt_style01_07.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/tatooine/shared_filler_building_tatt_style01_07.iff"
result.attribute_template_id = -1
result.stfName("building_name","filler_building_tatt_style01_07")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 28.764706 | 88 | 0.750511 |
acee042544dcd36cf87dba296b64f8ca242b12c4 | 141,181 | py | Python | riptable/rt_fastarray.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | bb928c11752e831ec701f91964979b31db53826a | [
"BSD-2-Clause-Patent"
] | null | null | null | riptable/rt_fastarray.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | bb928c11752e831ec701f91964979b31db53826a | [
"BSD-2-Clause-Patent"
] | null | null | null | riptable/rt_fastarray.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | bb928c11752e831ec701f91964979b31db53826a | [
"BSD-2-Clause-Patent"
] | null | null | null | __all__ = ['FastArray', 'Threading', 'Recycle','Ledger']
import logging
import os
from typing import Optional, Any, Callable, Tuple, Mapping, Union, List, Dict, Sequence
import warnings
import numpy as np
from numpy.core.numeric import ScalarType
import riptide_cpp as rc
from .rt_enum import gBinaryUFuncs, gBinaryLogicalUFuncs, gBinaryBitwiseUFuncs, gUnaryUFuncs, gReduceUFuncs
from .rt_enum import TypeRegister, ROLLING_FUNCTIONS, TIMEWINDOW_FUNCTIONS, REDUCE_FUNCTIONS, gNumpyScalarType, NumpyCharTypes, MATH_OPERATION, INVALID_DICT
from .Utils.rt_display_properties import ItemFormat, DisplayConvert, default_item_formats
from .rt_mlutils import normalize_minmax, normalize_zscore
from .rt_numpy import ismember, ones, unique, sort, full, empty, empty_like, searchsorted, _searchsorted, bool_to_fancy, issorted, repeat, tile, where, groupbyhash, asanyarray, crc32c
from .rt_sds import save_sds
from .rt_utils import sample, describe
from .rt_grouping import Grouping
try:
# optional extra routines if bottleneck installed
import bottleneck as bn
except Exception:
pass
# Create a logger for this module.
logger = logging.getLogger(__name__)
NUMPY_CONVERSION_TABLE: Mapping[Callable, REDUCE_FUNCTIONS] = {
np.sum: REDUCE_FUNCTIONS.REDUCE_SUM,
np.nansum: REDUCE_FUNCTIONS.REDUCE_NANSUM,
np.amin: REDUCE_FUNCTIONS.REDUCE_MIN,
np.nanmin: REDUCE_FUNCTIONS.REDUCE_NANMIN,
np.amax: REDUCE_FUNCTIONS.REDUCE_MAX,
np.nanmax: REDUCE_FUNCTIONS.REDUCE_NANMAX,
np.var: REDUCE_FUNCTIONS.REDUCE_VAR,
np.nanvar: REDUCE_FUNCTIONS.REDUCE_NANVAR,
np.mean: REDUCE_FUNCTIONS.REDUCE_MEAN,
np.nanmean: REDUCE_FUNCTIONS.REDUCE_NANMEAN,
np.std: REDUCE_FUNCTIONS.REDUCE_STD,
np.nanstd: REDUCE_FUNCTIONS.REDUCE_NANSTD,
np.argmin: REDUCE_FUNCTIONS.REDUCE_ARGMIN,
np.nanargmin: REDUCE_FUNCTIONS.REDUCE_NANARGMIN,
np.argmax: REDUCE_FUNCTIONS.REDUCE_ARGMAX,
np.nanargmax: REDUCE_FUNCTIONS.REDUCE_NANARGMAX,
# np.any: REDUCE_FUNCTIONS.REDUCE_ANY,
# np.all: REDUCE_FUNCTIONS.REDUCE_ALL,
}
#--------------------------------------------------------------
def FA_FROM_UINT8(uint8arr):
'''
Used in de-pickling
'''
return rc.CompressDecompressArrays([uint8arr],1)[0]
#--------------------------------------------------------------
def FA_FROM_BYTESTRING(bytestring):
'''
Used in de-pickling when tostring() used (currently disabled)
'''
return FA_FROM_UINT8(np.frombuffer(bytestring, dtype=np.uint8))
#--------------------------------------------------------------
def logical_find_common_type(arraytypes,scalartypes, scalarval):
'''
assumes one scalar and one array
'''
scalar = scalartypes[0]
array = arraytypes[0]
unsigned = False
isinteger = False
# TJD this routine needs to be rewritten
# can check isinstance(scalar,(np.integer, int))
# if this comes in as np.int64 and not a dtype, we convert to a dtype
if not hasattr(scalar, 'char'):
scalar = np.dtype(scalar)
if scalar.char in NumpyCharTypes.UnsignedInteger:
unsigned = True
isinteger = True
if scalar.char in NumpyCharTypes.Integer:
isinteger = True
if not isinteger:
# go by numpy upscale rules
# NOTE: should consider allowing integer ^ True -- or changing a bool scalar to an int
#print("punting not integer scalar", scalar)
return np.find_common_type(arraytypes,scalartypes)
unsigned = False
isinteger = False
try:
if array.char in NumpyCharTypes.UnsignedInteger:
unsigned = True
isinteger = True
if array.char in NumpyCharTypes.Integer:
isinteger = True
except:
pass
#if isinstance(array, int):
# isinteger = True
# IF ARRAY IS UNSIGNED BY SCALAR IS SIGNED upcast
if not isinteger:
# go by numpy upscale rules
# NOTE: should consider allowing integer ^ True -- or changing a bool scalar to an int
#print("punting not integer array", array)
return np.find_common_type(arraytypes,scalartypes)
final = None
scalarval=int(scalarval)
# Determine the possible integer upscaling based on the scalar value
if unsigned:
if scalarval <= 255: final=np.uint8
elif scalarval <= 65535: final=np.uint16
elif scalarval <= (2**32-1): final=np.uint32
elif scalarval <= (2**64-1): final=np.uint64
else: final=np.float64
else:
if scalarval >= -128 and scalarval <= 127: final=np.int8
elif scalarval >= -32768 and scalarval <= 32767: final=np.int16
elif scalarval >= -(2**31) and scalarval <= (2**31-1): final=np.int32
elif scalarval >= -(2**63) and scalarval <= (2**63-1): final=np.int64
else: final=np.float64
final = np.dtype(final)
# do not allow downcasting
if array.num < final.num:
#print("returning final", final)
return final
return array
#if type(args[0]) in ScalarType:
# print("converting arg2 to ", final_dtype)
# args[1] = args[1].astype(final_dtype);
#else:
# print("converting arg1 to ", final_dtype)
# args[0] = args[0].astype(final_dtype);
#--------------------------------------------------------------
def _ASTYPE(self, dtype):
''' internal call from array_ufunc to convert arrays. returns numpy arrays'''
#return self.astype(dtype)
to_num = dtype.num
if self.dtype.num <= 13 and to_num <= 13:
if FastArray.SafeConversions:
# perform a safe conversion understanding sentinels
return TypeRegister.MathLedger._AS_FA_TYPE(self, to_num)._np
else:
# perform unsafe conversion NOT understanding sentinels
return TypeRegister.MathLedger._AS_FA_TYPE_UNSAFE(self, to_num)._np
return self.astype(dtype)
#--------------------------------------------------------------
#--------------------------------------------------------------
class FastArray(np.ndarray):
'''
Class FastArray
replaces a numpy array for 1 dimensional arrays
arrays with more than 1 dimension are often punted back to numpy for calculations
example usage:
arr = FastArray([1,2,3,4,5])
arr = FastArray(np.arange(100))
arr = FastArray(list('abc'), unicode=True)
to flip an existing numpy array such as nparray use the view method
fa = nparray.view(FastArray)
to change it back
fa.view(np.ndarray) or fa._np
FastArray will take over many numpy ufuncs, can recycle arrays, and use multiple threads
How to subclass FastArray:
--------------------------
Required class definition:
class TestSubclass(FastArray):
def __new__(cls, arr, **args):
# before arr this call, arr needs to be a np.ndarray instance
return arr.view(cls)
def __init__(self, arr, **args):
pass
If the subclass is computable, you might define your own math operations.
In these operations, you might define what the subclass can be computed with. DateTimeNano is a good example.
Common operations to hook are comparisons:
__eq__(), __ne__(), __gt__(), __lt__(), __le__(), __ge__()
Basic math functions:
__add__(), __sub__(), __mul__(), etc.
Bracket indexing operations are very common. If the subclass needs to set or return a value
other than that in the underlying array, you need to take over:
__getitem__(), __setitem__()
Indexing is also used in display.
For regular console/notebook display, you need to take over:
__repr__():
>>> arr
__str__():
>>> print(arr)
_repr_html_() *for Jupyter Lab/Notebook
If the array is being displayed in a Dataset, and you require certain formatting you need to define two more methods:
display_query_properties() - returns an ItemFormat object (see Utils.rt_display_properties), and a conversion function
display_convert_func() - the conversion function returned by display_query_properties(), must return a string. each item being
displayed will go through this function individually, accompanied by an ItemFormat object.
The item going through this is the result of __getitem__() at a single index.
Many riptable operations need to return arrays of the same class they received. To ensure that your
subclass will retain its special properties, you need to take over newclassfrominstance().
Failure to take this over will often result in an object with uninitialized variables.
copy() is another method that is called generically in riptable routines, and needs to be taken
over to retain subclass properties.
For a view of the underlying FastArray, you can use the _fa property.
TODO: Need more text
'''
# Defines a generic np.ndarray subclass, that can cache numpy arrays
# Static Class VARIABLES
# change this to show or less values on __repr__
MAX_DISPLAY_LEN = 10
# set to 2 or 3 for extra debug information
Verbose = 1
# set to true for reusing numpy arrays instead of deleting them completely
Recycle = True
# set to true to preserve sentinels during internal array_ufunc calculations
SafeConversions = True
#set to false to be just normal numpy
FasterUFunc = True
NEW_ARRAY_FUNCTION_ENABLED = False
"""Enable implementation of array function protocol (default False)."""
# 0=Quiet, 1=Warn, 2=Exception
WarningLevel = 1
# set to true to not allow ararys we do not support
NoTolerance = False
# set to false to not compress when pickling
CompressPickle = True
# a dictionary to avoid repeating warnings in multiple places
# TODO: wrap this in a class so that warnings can be turned on/off
WarningDict = {
"multiple_dimensions" : "FastArray contains two or more dimensions greater than one - shape:{}. Problems may occur."
}
# For reduction operations, the identity element of the operation (for operations
# where such an element is defined).
# N.B. As of numpy 1.19 it does not appear there's a straightforward way of getting from
# something like ``np.sum`` back to ``np.add``, from which we could get the .identity property.
# If that ever changes, this dictionary would no longer be necessary so it can+should be removed.
_reduce_op_identity_value: Mapping[REDUCE_FUNCTIONS, Any] = {
REDUCE_FUNCTIONS.REDUCE_ALL: True, # np.all(np.array([]))
REDUCE_FUNCTIONS.REDUCE_ANY: False, # np.any(np.array([]))
REDUCE_FUNCTIONS.REDUCE_NANSUM: np.add.identity,
REDUCE_FUNCTIONS.REDUCE_SUM: np.add.identity
}
# --------------------------------------------------------------------------
class _ArrayFunctionHelper:
# TODO add usage examples
"""
Array function helper is responsible maintaining the array function protocol array implementations in the
form of the following API:
- get_array_function: given the Numpy function, returns overridden array function
- get_array_function_type_compatibility_check: given the Numpy function, returns overridden array function type compatibility check
- register_array_function: a function decorator whose argument is the Numpy function to override and the function that will override it
- register_array_function_type_compatibility: similar to register_array_function, but guards against incompatible array function protocol type arguments for the given Numpy function
- deregister: deregistration of the Numpy function and type compatibility override
- deregister_array_function_type_compatibility: deregistration of Numpy function type compatibility override
"""
# TODO design consideration - using a single dict with tuple type compatibility and redirected callables
# where a default type compatibility check can be the default value
# a dictionary that maps numpy functions to our custom variants
HANDLED_FUNCTIONS: Dict[callable, callable] = {}
"""Dictionary of Numpy API function with overridden functions."""
HANDLED_TYPE_COMPATIBILITY_CHECK: Dict[callable, callable] = {}
"""Dictionary of type compatibility functions per each Numpy API overridden function."""
@classmethod
def get_array_function(cls, np_function: Callable) -> Optional[Callable]:
"""
Given the Numpy function, returns overridden array function if implemented, otherwise None.
Parameters
----------
np_function: callable
The overridden Numpy array function.
Returns
-------
callable, optional
The overridden function as a callable or None if it's not implemented.
"""
return cls.HANDLED_FUNCTIONS.get(np_function, None)
@classmethod
def get_array_function_type_compatibility_check(cls, np_function: Callable) -> Optional[Callable]:
"""
Given the Numpy function, returns the corresponding array function type compatibility callable, otherwise None.
Parameters
----------
np_function: callable
The overridden Numpy array function.
Returns
-------
callable, optional
The overridden type compatibility function as a callable or None if it's not implemented.
"""
return cls.HANDLED_TYPE_COMPATIBILITY_CHECK.get(np_function, None)
@classmethod
def register_array_function(cls, np_function: Callable) -> Callable:
"""
A function decorator whose argument is the Numpy function to override and the function that will override it.
This registers the `np_function` with the function that it decorates.
Parameters
----------
np_function: callable
The overridden Numpy array function.
Returns
-------
callable
The decorator that registers `np_function` with the decorated function.
"""
# @wraps(np_function)
def decorator(func):
cls.HANDLED_FUNCTIONS[np_function] = func
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{cls.__name__}.register_array_function: registered {repr(func.__name__)} in place of {np_function.__name__}')
return func
return decorator
@classmethod
def register_array_function_type_compatibility(cls, np_function: Callable) -> Callable:
"""
This registers the type compatibility check for the `np_function` with the function that it decorates.
Parameters
----------
np_function: callable
The overridden Numpy array function.
Returns
-------
callable
The decorator that registers the type compatibility check for the `np_function` with the decorated function.
"""
# @wraps(np_function)
def decorator(check_type_compatibility):
cls.HANDLED_TYPE_COMPATIBILITY_CHECK[np_function] = check_type_compatibility
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{cls.__name__}.register_array_function_type_compatibility: registered type compatibility check {repr(check_type_compatibility)} for array function {np_function.__name__}')
return check_type_compatibility
return decorator
@classmethod
def deregister_array_function(cls, np_function: Callable) -> None:
"""
Deregistration of the Numpy function and type compatibility override.
Parameters
----------
np_function: callable
The overridden Numpy array function.
"""
if cls.get_array_function(np_function) is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{cls.__name__}.deregister_array_function: deregistered {repr(np_function.__name__)}')
del cls.HANDLED_FUNCTIONS[np_function]
@classmethod
def deregister_array_function_type_compatibility(cls, np_function: Callable) -> None:
"""
Deregistration of the Numpy function and type compatibility override.
Parameters
----------
np_function: callable
The overridden Numpy array function.
"""
if cls.HANDLED_TYPE_COMPATIBILITY_CHECK.get(np_function, None) is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{cls.__name__}.deregister_array_function_type_compatibility: deregistered {repr(np_function.__name__)}')
del cls.HANDLED_TYPE_COMPATIBILITY_CHECK[np_function]
@classmethod
def deregister(cls, np_function: Callable) -> None:
cls.deregister_array_function(np_function)
cls.deregister_array_function_type_compatibility(np_function)
#--------------------------------------------------------------------------
@classmethod
def _possibly_warn(cls, warning_string: str) -> Optional[bool]:
if cls.WarningLevel ==0:
return False
if cls.WarningLevel ==1:
warnings.warn(warning_string)
return True
raise TypeError(warning_string)
#--------------------------------------------------------------------------
def __new__(cls,arr,**kwargs):
allow_unicode = kwargs.get('unicode', False)
try:
del kwargs['unicode']
except:
pass
# If already a numpy array no need to call asany
if isinstance(arr, np.ndarray) and len(kwargs) == 0:
instance = arr
if isinstance(instance, cls) and instance.dtype.char != 'U':
if instance.dtype.char not in NumpyCharTypes.Supported:
cls._possibly_warn(f"FastArray contains an unsupported type '{instance.dtype}'. Problems may occur. Consider categoricals.")
# if already a FastArray, do not rewrap this
return instance
else:
# flip the list or other object to a numpy array
instance= np.asanyarray(arr,**kwargs)
if not allow_unicode and instance.dtype.char == 'U':
try:
instance = np.asarray(instance, dtype='S')
except:
pass
if len(instance.shape) == 0:
if instance.dtype.char in NumpyCharTypes.Supported:
instance = np.asanyarray([instance], **kwargs)
else:
# np.asarray on a set will return an object of 1
if isinstance(arr, set):
instance = np.asarray(list(arr), **kwargs)
else:
raise TypeError(f"FastArray cannot initialize {arr}")
if instance.ndim > 1:
# only one dimension can be greater than one
if cls._check_ndim(instance) > 1:
cls._possibly_warn(FastArray.WarningDict["multiple_dimensions"].format(instance.shape))
#warnings.warn(f"FastArray contains two or more dimensions greater than one - shape:{instance.shape}. Problems may occur.")
elif not (instance.flags.f_contiguous or instance.flags.c_contiguous):
# copy should eliminate strides problem
instance=instance.copy()
cls._possibly_warn(f"FastArray initialized with strides.")
# for arrays that can cause problems but we allow now
if cls.NoTolerance:
if not (instance.flags.f_contiguous or instance.flags.c_contiguous):
# copy should eliminate strides problem
instance=instance.copy()
cls._possibly_warn(f"FastArray initialized with strides.")
if instance.dtype.char not in NumpyCharTypes.Supported:
cls._possibly_warn(f"FastArray contains an unsupported type '{instance.dtype}'. Problems may occur. Consider categoricals.")
return instance.view(cls)
#--------------------------------------------------------------------------
def __reduce__(self):
'''
Used for pickling.
For just a FastArray we pass back the view of the np.ndarray, which then knows how to pickle itself.
NOTE: I think there is a faster way.. possible returning a byte string.
'''
cls = type(self)
# check if subclassed routine knows how to serialize itself
if hasattr(self, '_build_sds_meta_data'):
try:
name=self._name
except:
name='unknown'
tups =self._build_sds_meta_data(name)
return (cls._load_from_sds_meta_data, (name, self.view(FastArray), tups[1], tups[0]))
# set to true to turn compression on
if cls.CompressPickle and len(self) > 0:
# create a single compressed array of uint8
carr = rc .CompressDecompressArrays([self], 0)[0]
return (FA_FROM_UINT8, (carr.view(np.ndarray),))
else:
return (cls.__new__, (cls, self.view(np.ndarray),))
#--------------------------------------------------------------------------
@classmethod
def _check_ndim(cls, instance):
'''
Iterates through dimensions of an array, counting how many dimensions have values greater than 1.
Problems may occure with multidimensional FastArrays, and the user will be warned.
'''
index=0
aboveone=0
while index < instance.ndim:
if instance.shape[index] > 1:
aboveone+=1
index += 1
return aboveone
#--------------------------------------------------------------------------
def get_name(self):
'''
FastArray can hold a name. When a Dataset puts a FastArray into a column, it may receive a name.
Returns
-------
str, optional
The array name; or None if the array has not been named.
See Also
--------
set_name
'''
name=None
try:
name=self._name
except:
pass
return name
#--------------------------------------------------------------------------
def set_name(self, name):
'''
FastArray can hold a name. Use set_name to assign a name.
When a Dataset puts a FastArray into a named column, it may call set_name().
If the same FastArray is in two datasets, with two different column names,
another FastArray wrapper object will be created to hold the different name,
however the underlying array will remain the same in both datasets.
Returns
-------
FastArray
Returns 'self', so this will be the same type as the instance it's called on.
Examples
--------
>>> a=rt.arange(100)
>>> a.set_name('test')
See Also
--------
FastArray.get_name
'''
self._name = name
return self
#--------------------------------------------------------------------------
@staticmethod
def _FastFunctionsOn():
if FastArray.Verbose > 0: print(f"FASTFUNC ON: fastfunc was {FastArray.FasterUFunc}")
FastArray.FasterUFunc = True
@staticmethod
def _FastFunctionsOff():
if FastArray.Verbose > 0: print(f"FASTFUNC OFF: fastfunc was {FastArray.FasterUFunc}")
FastArray.FasterUFunc = False
@property
def _np(self):
'''
quick way to return a numpy array instead of fast array
'''
return self.view(np.ndarray)
@staticmethod
def _V0():
print("setting verbose level to 0")
FastArray.Verbose=0
return FastArray.Verbose
@staticmethod
def _V1():
print("setting verbose level to 1")
FastArray.Verbose=1
return FastArray.Verbose
@staticmethod
def _V2():
print("setting verbose level to 2")
FastArray.Verbose=2
return FastArray.Verbose
@staticmethod
def _ON():
'''
enable intercepting array ufunc
'''
return FastArray._FastFunctionsOn()
@staticmethod
def _OFF():
'''
disable intercepting of array ufunc
'''
return FastArray._FastFunctionsOff()
@staticmethod
def _TON():
print("Threading on")
return rc.ThreadingMode(0)
@staticmethod
def _TOFF():
print("Threading off")
return rc.ThreadingMode(1)
@staticmethod
def _RON(quiet=False):
'''
Turn on recycling.
Parameters
----------
quiet: bool, optional
Returns
-------
True if recycling was previously on, else False
'''
if not quiet:
print("Recycling numpy arrays on")
result = rc.SetRecycleMode(0)
FastArray.Recycle=True
return result
@staticmethod
def _ROFF(quiet=False):
'''
Turn off recycling.
Parameters
----------
quiet: bool, optional
Returns
-------
True if recycling was previously on, else False
'''
if not quiet:
print("Recycling numpy arrays off")
result = rc.SetRecycleMode(1)
FastArray.Recycle=False
return result
@staticmethod
def _RDUMP():
'''
Displays to server's stdout
Returns
-------
Total size of items not in use
'''
return rc.RecycleDump()
@staticmethod
def _GCNOW(timeout:int = 0):
'''
Pass the garbage collector timeout value to cleanup.
Passing 0 will force an immediate garbage collection.
Returns
-------
Dictionary of memory heuristics including 'TotalDeleted'
'''
import gc
gc.collect()
result= rc.RecycleGarbageCollectNow(timeout)
totalDeleted = result['TotalDeleted']
if totalDeleted > 0:
FastArray._GCNOW(timeout)
return result
@staticmethod
def _GCSET(timeout:int = 100):
'''
Pass the garbage collector timeout value to expire
The timeout value is roughly in 2/5 secs
A value of 100 is usually about 40 seconds
Returns
-------
Previous timespan
'''
return rc.RecycleSetGarbageCollectTimeout(timeout)
@staticmethod
def _LON():
'''Turn the math ledger on to record all array math routines'''
return TypeRegister.MathLedger._LedgerOn()
@staticmethod
def _LOFF():
'''Turn the math ledger off'''
return TypeRegister.MathLedger._LedgerOff()
@staticmethod
def _LDUMP(dataset=True):
'''Print out the math ledger'''
return TypeRegister.MathLedger._LedgerDump(dataset=dataset)
@staticmethod
def _LDUMPF(filename):
'''Save the math ledger to a file'''
return TypeRegister.MathLedger._LedgerDumpFile(filename)
@staticmethod
def _LCLEAR():
'''Clear all the entries in the math ledger'''
return TypeRegister.MathLedger._LedgerClear()
#--------------------------------------------------------------------------
def __setitem__(self, fld, value):
"""
Used on the left hand side of
arr[fld] = value
This routine tries to convert invalid dtypes to that invalids are preserved when setting
The mbset portion of this is no written (which will not raise an indexerror on out of bounds)
Parameters
----------
fld: scalar, boolean, fancy index mask, slice, sequence, or list
value: scalar, sequence or dataset value as follows
sequence can be list, tuple, np.ndarray, FastArray
Raises
-------
IndexError
"""
newvalue = None
# try to make an array, even if array of 1
if np.isscalar(value):
if not isinstance(value,(str, bytes, np.bytes_, np.str_)):
# convert to array of 1 item
newvalue = FastArray([value])
elif isinstance(value, (list, tuple)):
# convert to numpy array
newvalue = FastArray(value, unicode=True)
elif isinstance(value, np.ndarray):
# just reference it
newvalue = value
if newvalue is not None:
# now we have a numpy array.. convert the dtype to match us
# this should take care of invalids
# convert first 14 common types (bool, ints, floats)
if newvalue.dtype != self.dtype and newvalue.dtype.num <= 13:
newvalue = newvalue.astype(self.dtype)
# check for boolean array since we do not handle fancy index yet
if isinstance(fld, np.ndarray) and fld.dtype.num ==0:
if self._is_not_supported(newvalue):
# make it contiguous
newvalue = newvalue.copy()
# call our setitem, it will return False if it fails
if rc.SetItem(self, fld, newvalue):
return
try:
np.ndarray.__setitem__(self, fld, newvalue)
except Exception:
# odd ball cases handled here like ufunc tests
np.ndarray.__setitem__(self, fld, value)
return
# punt to normal numpy
np.ndarray.__setitem__(self, fld, value)
#--------------------------------------------------------------------------
def __getitem__(self, fld):
'''
riptable has special routines to handle array input in the indexer.
Everything else will go to numpy getitem.
'''
if isinstance(fld, np.ndarray):
#result= super(FastArray, self).__getitem__(fld).view(FastArray)
if fld.dtype == np.bool_:
# make sure no striding
# NOTE: will fail on self.dtype.byteorder as little endian
if self.flags.f_contiguous:
return TypeRegister.MathLedger._INDEX_BOOL(self,fld)
# if we have fancy indexing and we support the array type, make sure we do not have stride problem
if fld.dtype.char in NumpyCharTypes.AllInteger and self.dtype.char in NumpyCharTypes.SupportedAlternate:
if self.flags.f_contiguous and fld.flags.f_contiguous:
if len(self.shape) ==1:
return TypeRegister.MathLedger._MBGET(self,fld)
result= TypeRegister.MathLedger._GETITEM(super(FastArray, self),fld)
return result.view(FastArray)
else:
# could be a list which is often converted to an array
# This assumes that FastArray has a sole parent, np.ndarray
# If this changes, the super() call needs to be used
return np.ndarray.__getitem__(self, fld)
#return super(FastArray, self).__getitem__(fld)
#--------------------------------------------------------------------------
def display_query_properties(self):
'''
Returns an ItemFormat object and a function for converting the FastArrays items to strings.
Basic types: Bool, Int, Float, Bytes, String all have default formats / conversion functions.
(see Utils.rt_display_properties)
If a new type is a subclass of FastArray and needs to be displayed in format
different from its underlying type, it will need to take over this routine.
'''
arr_type, convert_func = DisplayConvert.get_display_convert(self)
display_format = default_item_formats.get(arr_type, ItemFormat())
if len(self.shape) > 1:
display_format.convert = convert_func
convert_func = DisplayConvert.convertMultiDims
# add sentinel value for integer
if display_format.invalid is None:
display_format = display_format.copy()
if self.dtype.char in NumpyCharTypes.AllInteger:
display_format.invalid = INVALID_DICT[self.dtype.num]
return display_format, convert_func
#--------------------------------------------------------------------------
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
#result= super(FastArray, self).astype(dtype, order,casting,subok,copy)
# 17 is object
# 18 = ASCII string
# 19 = UNICODE string
to_num=np.dtype(dtype).num
# check for contiguous in one or two dimensions
if self.flags.f_contiguous or self.flags.c_contiguous:
if order=='K' and subok and copy and self.dtype.num <= 13 and to_num <= 13:
# perform a safe conversion understanding sentinels
return TypeRegister.MathLedger._AS_FA_TYPE(self, to_num)
# punt to numpy
result=TypeRegister.MathLedger._ASTYPE(super(FastArray, self), dtype, order,casting,subok,copy)
return result.view(FastArray)
#--------------------------------------------------------------------------
def _view_internal(self, type=None):
'''
FastArray subclasses need to take this over if they want to make a shallow copy of
a fastarray instead of viewing themselves as a fastarray (which drops their other properties).
Taking over view directly may have a lot of unintended consequences.
'''
if type is not FastArray or type is not None:
newarr=self.view(type)
# copy all the properties
newarr.__dict__ = self.__dict__.copy()
return newarr
return self.view(FastArray)
#--------------------------------------------------------------------------
def copy(self, order='K'):
#result= super(FastArray, self).copy(order)
if self.flags.f_contiguous or self.flags.c_contiguous:
if order=='K' and self.dtype.num <= 13:
# perform a faster multithreaded copy
return TypeRegister.MathLedger._AS_FA_TYPE(self, self.dtype.num)
result= TypeRegister.MathLedger._COPY(super(FastArray, self), order)
return result.view(FastArray)
#--------------------------------------------------------------------------
def copy_invalid(self):
'''
Makes a copy of the array filled with invalids.
Examples
--------
>>> rt.arange(5).copy_invalid()
FastArray([-2147483648, -2147483648, -2147483648, -2147483648, -2147483648])
>>> rt.arange(5).copy_invalid().astype(np.float32)
FastArray([nan, nan, nan, nan, nan], dtype=float32)
See Also:
---------
FastArray.inv
FastArray.fill_invalid
'''
return self.fill_invalid(inplace=False)
#--------------------------------------------------------------------------
@property
def inv(self):
'''
Returns the invalid value for the array.
np.int8: -128
np.uint8: 255
np.int16: -32768
...and so on..
Examples
--------
>>> rt.arange(5).inv
-2147483648
See Also:
---------
FastArray.copy_invalid
FastArray.fill_invalid
INVALID_DICT
'''
return INVALID_DICT[self.dtype.num]
#--------------------------------------------------------------------------
def fill_invalid(self, shape=None, dtype=None, inplace=True):
'''
Fills array or returns copy of array with invalid value of array's dtype or a specified one.
Warning: by default this operation is inplace.
Examples
--------
>>> a=rt.arange(5).fill_invalid()
>>> a
FastArray([-2147483648, -2147483648, -2147483648, -2147483648, -2147483648])
See Also:
---------
FastArray.inv
FastArray.fill_invalid
'''
return self._fill_invalid_internal(shape=shape, dtype=dtype, inplace=inplace)
def _fill_invalid_internal(self, shape=None, dtype=None, inplace=True, fill_val=None):
if dtype is None:
dtype = self.dtype
if shape is None:
shape = self.shape
elif not isinstance(shape, tuple):
shape = (shape,)
if fill_val is None:
inv = INVALID_DICT[dtype.num]
else:
inv = fill_val
if inplace is True:
if shape != self.shape:
raise ValueError(f"Inplace fill invalid cannot be different number of rows than existing array. Got {shape} vs. length {len(self)}")
if dtype != self.dtype:
raise ValueError(f"Inplace fill invalid cannot be different dtype than existing categorical. Got {dtype} vs. {len(self.dtype)}")
self.fill(inv)
else:
arr = full(shape, inv, dtype=dtype)
return arr
# -------------------------------------------------------------------------
def isin(self, test_elements, assume_unique=False, invert=False):
'''
Calculates `self in test_elements`, broadcasting over `self` only.
Returns a boolean array of the same shape as `self` that is True
where an element of `self` is in `test_elements` and False otherwise.
Parameters
----------
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is an array or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted, as if
calculating `element not in test_elements`. Default is False.
``np.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``np.invert(np.isin(a, b))``.
Returns
-------
isin : ndarray, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
Note: behavior differs from pandas
- Riptable favors bytestrings, and will make conversions from unicode/bytes to match for operations as necessary.
- We will also accept single scalars for values.
- Pandas series will return another series - we have no series, and will return a FastArray
Examples
--------
>>> from riptable import *
>>> a = FA(['a','b','c','d','e'], unicode=False)
>>> a.isin(['a','b'])
FastArray([ True, True, False, False, False])
>>> a.isin('a')
FastArray([ True, False, False, False, False])
>>> a.isin({'b'})
FastArray([ False, True, False, False, False])
'''
if isinstance(test_elements, set):
test_elements = list(test_elements)
if not isinstance(test_elements, np.ndarray):
# align byte string vs unicode
if self.dtype.char in 'SU':
if np.isscalar(test_elements):
test_elements = np.asarray([test_elements], dtype=self.dtype.char)
else:
test_elements = np.asarray(test_elements, dtype=self.dtype.char)
else:
if isinstance(test_elements, tuple):
raise ValueError('isin does not currently support tuples. In the future a tuple will be used to represent a multi-key.')
test_elements = rc.AsFastArray(test_elements)
try:
# optimization: if we have just one element, we can just parallel compare that one element
if len(test_elements) ==1:
# string comparison to int will fail
result = self == test_elements[0]
# check for failed result
if np.isscalar(result):
result = ismember(self, test_elements)[0]
else:
result = ismember(self, test_elements)[0]
if invert:
np.logical_not(result, out=result)
return result
except Exception:
# punt non-supported types to numpy
return np.isin(self._np, test_elements, assume_unique=assume_unique, invert=invert)
# -------------------------------------------------------------------------
def between(self, low, high, include_low:bool=True, include_high:bool=False):
"""
Determine which elements of the array are in a a given interval.
Return a boolean mask indicating which elements are between `low` and `high` (including/excluding endpoints
can be controlled by the `include_low` and `include_high` arguments).
Default behaviour is equivalent to (self >= low) & (self < high).
Parameters
----------
low: scalar, array_like
Lower bound for test interval. If array, should have the same size as `self` and comparisons are done elementwise.
high: scalar, array_like
Upper bound for test interval. If array, should have the same size as `self` and comparisons are done elementwise.
include_low: bool
Should the left endpoint included in the test interval
include_high: bool
Should the right endpoint included in the test interval
Returns
-------
array_like[bool]
An boolean mask indicating if the associated elements are in the test interval
"""
low = asanyarray(low)
high = asanyarray(high)
if include_low:
ret = self >= low
else:
ret = self > low
if include_high:
ret &= self <= high
else:
ret &= self < high
return ret
#--------------------------------------------------------------------------
def sample(self, N=10, filter=None):
'''
Examples
--------
>>> a=rt.arange(10)
>>> a.sample(3)
FastArray([0, 4, 9])
'''
return sample(self, N=N, filter=filter)
#--------------------------------------------------------------------------
def duplicated(self, keep='first', high_unique=False):
'''
See pandas.Series.duplicated
Duplicated values are indicated as True values in the resulting
FastArray. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- 'first' : Mark duplicates as True except for the first occurrence.
- 'last' : Mark duplicates as True except for the last occurrence.
- False : Mark values with just one occurrence as False.
'''
arr = self
if keep == 'last':
arr = arr[::-1].copy()
elif keep is not False and keep != 'first':
raise ValueError(f'keep must be either "first", "last" or False')
# create an return array all set to True
result = ones(len(arr), dtype=np.bool_)
g = Grouping(arr._fa if hasattr(arr,'_fa') else arr)
if keep is False:
# search for groups with a count of 1
result[g.ifirstkey[g.ncountgroup[1:]==1]] = False
else:
result[g.ifirstkey] = False
if keep == 'last':
result= result[::-1].copy()
return result
#--------------------------------------------------------------------------
def save(self, filepath: Union[str, os.PathLike], share: Optional[str] = None, compress: bool = True, overwrite: bool = True, name: Optional[str] = None):
'''
Save a single array in an .sds file.
Parameters
----------
filepath: str or os.PathLike
share : str, optional, default None
compress : bool, default True
overwrite : bool, default True
name : str, optional, default None
See Also
--------
rt_sds.save_sds
'''
save_sds(filepath, self, share=share, compress=compress, overwrite=overwrite, name=name)
#--------------------------------------------------------------------------
def reshape(self, *args, **kwargs):
result = super(FastArray, self).reshape(*args, **kwargs)
# this warning happens too much now
#if FastArray._check_ndim(result) != 1:
# warnings.warn(FastArray.WarningDict["multiple_dimensions"].format(result.shape))
if not (result.flags.c_contiguous or result.flags.f_contiguous):
# fix strides problem
return result.copy()
return result
#--------------------------------------------------------------------------
def repeat(self, repeats, axis=None):
''' see rt.repeat '''
return repeat(self, repeats, axis=axis)
#--------------------------------------------------------------------------
def tile(self, reps):
''' see rt.tile '''
return tile(self, reps)
#--------------------------------------------------------------------------
def _kwarg_check(self, *args, **kwargs):
# we handle dtype
if ( "ddof" in kwargs and kwargs["ddof"]!=1 ) or "axis" in kwargs or "keepdims" in kwargs:
return True
#--------------------------------------------------------------------------
def _reduce_check(self, reduceFunc: REDUCE_FUNCTIONS, npFunc, *args, **kwargs):
'''
Arg2: npFunc pass in None if no numpy equivalent function
'''
if npFunc is not None and self._kwarg_check(*args, **kwargs):
# TODO: add to math ledger
# set ddof=1 if NOT set which is FastArray default to match matlab/pandas
if 'ddof' not in kwargs and reduceFunc in [
REDUCE_FUNCTIONS.REDUCE_VAR,
REDUCE_FUNCTIONS.REDUCE_NANVAR,
REDUCE_FUNCTIONS.REDUCE_STD,
REDUCE_FUNCTIONS.REDUCE_NANSTD]:
kwargs['ddof']=1
result = npFunc(self._np, *args, **kwargs)
return result
result = TypeRegister.MathLedger._REDUCE(self, reduceFunc)
# It's possible there was no result returned from the reduction function;
# e.g. if the input was empty. If the function being called is well-defined
# for empty lists -- i.e. it is a reduction operation with a defined
# identity element -- set the result to the identity element so the rest of
# the logic below will work correctly.
# If there is no identity element for this operation, raise an exception to
# let the user know; we'd raise an exception below *anyway*, and this allows
# us to provide the user with a more-descriptive/actionable error message.
if result is None:
op_identity_val = type(self)._reduce_op_identity_value.get(reduceFunc, None)
if op_identity_val is not None:
result = op_identity_val
else:
raise ValueError(f"Reduction '{str(reduceFunc)}' does not have an identity element so cannot be computed over an empty array.")
# Was an output dtype was explicitly specified?
dtype = kwargs.get('dtype', None)
if dtype is not None:
# user forced dtype return value
return dtype(result)
#preserve type for min/max/nanmin/nanmax
if reduceFunc in [
REDUCE_FUNCTIONS.REDUCE_MIN,
REDUCE_FUNCTIONS.REDUCE_NANMIN,
REDUCE_FUNCTIONS.REDUCE_MAX,
REDUCE_FUNCTIONS.REDUCE_NANMAX]:
return self.dtype.type(result)
#internally numpy expects a dtype returned for nanstd and other calculations
if isinstance(result,(int, np.integer)):
# for uint64, the high bit must be preserved
if self.dtype.char in NumpyCharTypes.UnsignedInteger64:
return np.uint64(result)
return np.int64(result)
return np.float64(result)
#---------------------------------------------------------------------------
def _compare_check(self, func, other):
# a user might type in a string and we want a bytes string
if self.dtype.char in 'SU':
if isinstance(other, str):
if (self.dtype.char=='S'):
# we are byte strings but scalar unicode passed in
other = str.encode(other)
if isinstance(other, list):
# convert the list so a comparison can be made to the byte string array
other = FastArray(other)
result= func(other)
#NOTE: numpy does call FA ufunc for strings
if not isinstance(result, FastArray) and isinstance(result,np.ndarray):
result = result.view(FastArray)
return result
result= func(other)
return result
def __ne__(self,other): return self._compare_check(super().__ne__,other)
def __eq__(self, other): return self._compare_check(super().__eq__,other)
def __ge__(self, other): return self._compare_check(super().__ge__,other)
def __gt__(self, other): return self._compare_check(super().__gt__,other)
def __le__(self, other): return self._compare_check(super().__le__,other)
def __lt__(self, other): return self._compare_check(super().__lt__,other)
#---------------------------------------------------------------------------
def str_append(self, other):
if self.dtype.num == other.dtype.num:
func=TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
return func((self, other), MATH_OPERATION.ADD, self.dtype.num)
raise TypeError("cannot concat")
#---------------------------------------------------------------------------
def squeeze(self, *args, **kwargs):
return self._np.squeeze(*args, **kwargs)
#---------------------------------------------------------------------------
def iscomputable(self):
return TypeRegister.is_computable(self)
#############################################
# nep-18 array function protocol implementation
#############################################
@classmethod
def _py_number_to_np_dtype(cls, val: Union[int, np.integer, None], dtype: np.dtype) -> Union[np.uint, np.int64, np.float64, None]:
"""Convert a python type to numpy dtype.
Only handles integers."""
if val is not None:
# internally numpy expects a dtype returned for nanstd and other calculations
if isinstance(val, (int, np.integer)):
# for uint64, the high bit must be preserved
if dtype.char in NumpyCharTypes.UnsignedInteger64:
return np.uint64(val)
return np.int64(val)
return np.float64(val)
return val
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.argmax)
def _argmax(a, axis=None, out=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_ARGMAX, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanargmax)
def _nanargmax(a, axis=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANARGMAX, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.argmin)
def _argmin(a, axis=None, out=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_ARGMIN, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanargmin)
def _nanargmin(a, axis=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANARGMIN, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.empty_like)
def _empty_like(array: 'FastArray',
dtype: Optional[Union[str, np.dtype]] = None,
order: str = 'K',
subok: bool = True,
shape: Optional[Union[int, Sequence[int]]] = None
) -> 'FastArray':
array = array._np
result = rc.LedgerFunction(np.empty_like, array, dtype=dtype, order=order, subok=subok, shape=shape)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.max)
def _max(a, axis=None, out=None, keepdims=None, initial=None, where=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_MAX, 0)
if result is not None:
return a.dtype.type(result)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanmax)
def _nanmax(a, axis=None, out=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANMAX, 0)
if result is not None:
return a.dtype.type(result)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.mean)
def _mean(a, axis=None, dtype=None, out=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_MEAN, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanmean)
def _nanmean(a, axis=None, dtype=None, out=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANMEAN, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.min)
def _min(a, axis=None, out=None, keepdims=None, initial=None, where=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_MIN, 0)
if result is not None:
return a.dtype.type(result)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanmin)
def _nanmin(a, axis=None, out=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANMIN, 0)
if result is not None:
return a.dtype.type(result)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.std)
def _std(a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_STD, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanstd)
def _nanstd(a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANSTD, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.round)
@_ArrayFunctionHelper.register_array_function(np.around)
@_ArrayFunctionHelper.register_array_function(np.round_) # N.B, round_ is an alias for around
def _round_(a, decimals=None, out=None):
# TODO handle `decimal` and `out` arguments
# If callers decide to use this FastArray staticmethod outside the scope of array function protocol
# provide argument checks since it may become unclear when things fail at the C extension layer.
if not isinstance(a, FastArray):
raise ValueError(f'{FastArray.__name__}._round_ expected FastArray subtype, got {type(a)}')
original_dtype = a.dtype
a = a.astype(np.float64)
fast_function = gUnaryUFuncs.get(np.round, None)
if fast_function is None:
raise ValueError(f'{FastArray.__name__}._round_ unhandled array function {np.round}\nKnown numpy array function to riptable functions: {repr(gUnaryUFuncs)}')
# For MATH_OPERATION.ROUND, _BASICMATH_ONE_INPUT returns an array `array(None, dtype=object)`
# if the input dtype is not a float64. As a workaround cast to float64 dtype, perform the operation,
# then cast back to the original dtype.
result = TypeRegister.MathLedger._BASICMATH_ONE_INPUT(a, fast_function, 0)
if not isinstance(result, FastArray) and isinstance(result, np.ndarray):
result = result.view(FastArray)
if result.dtype != original_dtype:
result = result.astype(original_dtype)
return result
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.sum)
def _sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_SUM, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nansum)
def _nansum(a, axis=None, dtype=None, out=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANSUM, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.var)
def _var(a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_VAR, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
@staticmethod
@_ArrayFunctionHelper.register_array_function(np.nanvar)
def _nanvar(a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
result = rc.Reduce(a, REDUCE_FUNCTIONS.REDUCE_NANVAR, 0)
return FastArray._py_number_to_np_dtype(result, a.dtype)
#############################################
# Helper section
#############################################
def abs(self, *args, **kwargs): return np.abs(self, *args, **kwargs)
def median(self, *args, **kwargs): return np.median(self, *args, **kwargs)
def unique(self, *args, **kwargs): return unique(self, *args, **kwargs)
def clip_lower(self, a_min, **kwargs): return self.clip(a_min, None, **kwargs)
def clip_upper(self, a_max, **kwargs): return self.clip(None, a_max, **kwargs)
def sign(self, **kwargs): return np.sign(self, **kwargs)
def trunc(self, **kwargs): return np.trunc(self, **kwargs)
def where(self, condition, y=np.nan, **kwargs): return where(condition, self, y, **kwargs)
def count(self, sorted=True):
'''
Returns the unique counts
Same as calling.unique(return_counts=True)
Other Parameters
----------------
sorted=True, set to False for first appearance
Examples
--------
>>> a=arange(10) %3
>>> a.count()
*Unique Count
------- -----
0 4
1 3
2 3
'''
unique_counts= unique(self, sorted=sorted, return_counts=True)
name=self.get_name()
if name is None: name = 'Unique'
ds= TypeRegister.Dataset({name: unique_counts[0], 'Count': unique_counts[1]})
ds.label_set_names([name])
return ds
#############################################
# Rolling section (cannot handle strides)
#############################################
def rolling_sum(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_SUM, window)
def rolling_nansum(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_NANSUM, window)
def rolling_mean(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_MEAN, window)
def rolling_nanmean(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_NANMEAN, window)
def rolling_var(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_VAR, window)
def rolling_nanvar(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_NANVAR, window)
def rolling_std(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_STD, window)
def rolling_nanstd(self, window:int = 3): return rc.Rolling(self, ROLLING_FUNCTIONS.ROLLING_NANSTD, window)
#############################################
# TimeWindow section (cannot handle strides), time_array must be INT64
#############################################
def timewindow_sum(self, time_array, time_dist):
'''
The input array must be int64 and sorted with ever increasing values.
Sums up the values for a given time window.
Parameters
----------
time_array: sorted integer array of timestamps
time_dist: integer value of the time window size
Examples
--------
>>> a=rt.arange(10, dtype=rt.int64)
>>> a.timewindow_sum(a,5)
FastArray([ 0, 1, 3, 6, 10, 15, 21, 27, 33, 39], dtype=int64)
'''
return rc.TimeWindow(self, time_array, TIMEWINDOW_FUNCTIONS.TIMEWINDOW_SUM, time_dist)
def timewindow_prod(self, time_array, time_dist):
'''
The input array must be int64 and sorted with ever increasing values.
Multiplies up the values for a given time window.
Parameters
----------
time_array: sorted integer array of timestamps
time_dist: integer value of the time window size
Examples
--------
>>> a=rt.arange(10, dtype=rt.int64)
>>> a.timewindow_prod(a,5)
FastArray([ 0, 0, 0, 0, 0, 0, 720, 5040, 20160, 60480], dtype=int64)
'''
return rc.TimeWindow(self, time_array, TIMEWINDOW_FUNCTIONS.TIMEWINDOW_PROD, time_dist)
#############################################
# Bottleneck section (only handles int32/int64/float32/float64)
# bottleneck is optional
#############################################
def move_sum(self, *args, **kwargs): return bn.move_sum(self, *args, **kwargs)
def move_mean(self, *args, **kwargs): return bn.move_mean(self, *args, **kwargs)
def move_std(self, *args, **kwargs): return bn.move_std(self, *args, **kwargs)
def move_var(self, *args, **kwargs): return bn.move_var(self, *args, **kwargs)
def move_min(self, *args, **kwargs): return bn.move_min(self, *args, **kwargs)
def move_max(self, *args, **kwargs): return bn.move_max(self, *args, **kwargs)
def move_argmin(self, *args, **kwargs): return bn.move_argmin(self, *args, **kwargs)
def move_argmax(self, *args, **kwargs): return bn.move_argmax(self, *args, **kwargs)
def move_median(self, *args, **kwargs): return bn.move_median(self, *args, **kwargs)
def move_rank(self, *args, **kwargs): return bn.move_rank(self, *args, **kwargs)
#---------------------------------------------------------------------------
def replace(self, old, new): return bn.replace(self, old, new)
def partition2(self, *args, **kwargs): return bn.partition(self, *args, **kwargs)
def argpartition2(self, *args, **kwargs): return bn.argpartition(self, *args, **kwargs)
def rankdata(self, *args, **kwargs): return bn.rankdata(self, *args, **kwargs)
def nanrankdata(self, *args, **kwargs): return bn.nanrankdata(self, *args, **kwargs)
def push(self, *args, **kwargs): return bn.push(self, *args, **kwargs)
#---------------------------------------------------------------------------
def issorted(self):
''' returns True if the array is sorted otherwise False
If the data is likely to be sorted, call the issorted property to check.
'''
return issorted(self)
#---------------------------------------------------------------------------
def _unary_op(self, funcnum, fancy=False):
if self._is_not_supported(self):
# make it contiguous
arr = self.copy()
else:
arr = self
func=TypeRegister.MathLedger._BASICMATH_ONE_INPUT
result = func(arr, funcnum, 0)
if result is None:
raise TypeError(f'Could not perform operation {funcnum} on FastArray of dtype {arr.dtype}')
if fancy:
result = bool_to_fancy(result)
return result
#############################################
# Boolean section
#############################################
def isnan(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNAN, fancy=fancy)
def isnotnan(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNOTNAN, fancy=fancy)
def isnanorzero(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNANORZERO, fancy=fancy)
def isfinite(self, fancy=False): return self._unary_op(MATH_OPERATION.ISFINITE, fancy=fancy)
def isnotfinite(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNOTFINITE, fancy=fancy)
def isinf(self, fancy=False): return self._unary_op(MATH_OPERATION.ISINF, fancy=fancy)
def isnotinf(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNOTINF, fancy=fancy)
def isnormal(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNORMAL, fancy=fancy)
def isnotnormal(self, fancy=False): return self._unary_op(MATH_OPERATION.ISNOTNORMAL, fancy=fancy)
#############################################
# Reduce section
#############################################
def nansum(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANSUM, np.nansum, *args, **kwargs)
def mean(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_MEAN, np.mean, *args, **kwargs)
def nanmean(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANMEAN,np.nanmean, *args, **kwargs)
#---------------------------------------------------------------------------
# these function take a ddof kwarg
def var(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_VAR, np.var, *args, **kwargs)
def nanvar(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANVAR, np.nanvar, *args, **kwargs)
def std(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_STD, np.std, *args, **kwargs)
def nanstd(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANSTD, np.nanstd, *args, **kwargs)
#---------------------------------------------------------------------------
def nanmin(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANMIN, np.nanmin, *args, **kwargs)
def nanmax(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANMAX, np.nanmax, *args, **kwargs)
#---------------------------------------------------------------------------
def argmin(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_ARGMIN, np.argmin, *args, **kwargs)
def argmax(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_ARGMAX, np.argmax, *args, **kwargs)
def nanargmin(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANARGMIN, np.nanargmin, *args, **kwargs)
def nanargmax(self, *args, **kwargs): return self._reduce_check( REDUCE_FUNCTIONS.REDUCE_NANARGMAX, np.nanargmax, *args, **kwargs)
#############################################
# Stats/ML section
#############################################
def normalize_zscore(self): return normalize_zscore(self)
def normalize_minmax(self): return normalize_minmax(self)
#############################################
# BasicMath section (to be hooked at C level now)
#############################################
#def __add__(self, value): result=rc.BasicMathTwoInputs((self, value), 1, 0); result= result if result is not None else np.add(self,value); return result
#def __add__(self, value): return rc.BasicMathTwoInputs((self, value), 1, 0)
@property
def crc(self) -> int:
'''
Calculate the 32-bit CRC of the data in this array using the Castagnoli polynomial (CRC32C).
This function does not consider the array's shape or strides when calculating the CRC,
it simply calculates the CRC value over the entire buffer described by the array.
Examples
--------
can be used to compare two arrays for structural equality
>>> a = arange(100)
>>> b = arange(100.0)
>>> a.crc == b.crc
False
'''
return crc32c(self)
#todo: range/nanrange
#todo: stats/nanstats
#-------------------------------------------------------
def nunique(self):
'''
Returns number of unique values in array. Does not include nan or sentinel values in the count.
Examples
--------
Float with nan:
>>> a = FastArray([1.,2.,3.,np.nan])
>>> a.nunique()
3
Signed integer with sentinel:
>>> a = FastArray([-128, 2, 3], dtype=np.int8)
>>> a.nunique()
2
Unsigned integer with sentinel:
>>> a = FastArray([255, 2, 3], dtype=np.uint8)
>>> a.nunique()
2
'''
un = unique(self)
count = len(un)
if count > 0:
# unique is sorted, so check for sentinel based on dtype
inv = INVALID_DICT[self.dtype.num]
if self.dtype.char in NumpyCharTypes.AllFloat:
# check if last item is nan
if un[count-1] != un[count-1]:
count -= 1
# unsigned int uses high number as sentinel
elif self.dtype.char in NumpyCharTypes.UnsignedInteger:
if un[count-1] == inv:
count -= 1
# all other sentinels are lowest number
else:
if un[0] == inv:
count -=1
return count
#-------------------------------------------------------
def searchsorted(self, v, side='left', sorter=None):
return _searchsorted(self, v, side=side, sorter=sorter)
#---------------------------------------------------------------------------
def map_old(self, npdict:dict):
'''
d = {1:10, 2:20}
dat['c'] = dat.a.map(d)
print(dat)
a b cb c
0 1 0 0.0 10
1 1 1 1.0 10
2 1 2 3.0 10
3 2 3 5.0 20
4 2 4 7.0 20
5 2 5 9.0 20
'''
outArray = self.copy()
for k,v in npdict.items():
outArray[self==k]=v
return outArray
def map(self, npdict:dict):
'''
Notes
-----
Uses ismember and can handle large dictionaries
Examples
--------
>>> a=arange(3)
>>> a.map({1: 'a', 2:'b', 3:'c'})
FastArray(['', 'a', 'b'], dtype='<U1')
>>> a=arange(3)+1
>>> a.map({1: 'a', 2:'b', 3:'c'})
FastArray(['a', 'b', 'c'], dtype='<U1')
'''
orig = FastArray([*npdict], unicode=True)
replace = FastArray([*npdict.values()], unicode=True)
outArray = self.fill_invalid(self.shape, dtype=replace.dtype, inplace=False)
found, idx = ismember(self, orig)
outArray[found] = replace[idx[found]]
return outArray
#---------------------------------------------------------------------------
def shift(self, periods=1, invalid=None):
"""
Modeled on pandas.shift.
Values in the array will be shifted to the right for positive, to the left for negative.
Spaces at either end will be filled with an invalid based on the datatype.
If abs(periods) >= the length of the FastArray, it will return a FastArray full of invalid
will be returned.
Parameters
----------
periods: int, 1
number of elements to shift right (if positive) or left (if negative), defaults to 1
invalid: None, default
optional invalid value to fill
Returns
-------
FastArray shifted right or left by number of periods
Examples
--------
>>> arange(5).shift(2)
FastArray([-2147483648, -2147483648, 0, 1, 2])
"""
if periods == 0:
return self
if invalid is None:
if isinstance(self, TypeRegister.Categorical):
invalid =0
else:
try:
invalid = INVALID_DICT[self.dtype.num]
except Exception:
raise TypeError(f"shift does not support the dtype {self.dtype.name!r}")
# we know that this is a simple vector: shape == (len, )
# TODO: get recycled
temp = empty_like(self)
if abs(periods) >= len(self):
temp.fill(invalid)
elif periods > 0:
temp[:periods] = invalid
temp[periods:] = self[:-periods]
else:
temp[:periods] = self[-periods:]
temp[periods:] = invalid
# to rewrap categoricals or datelike
if hasattr(self, 'newclassfrominstance'):
temp = self.newclassfrominstance(temp, self)
return temp
#-------------------------------------------------------
def _internal_self_compare(self, math_op, periods=1, fancy=False):
''' internal routine used for differs and transitions '''
result = empty_like(self, dtype=np.bool_)
if periods == 0:
raise ValueError("periods of 0 is invalid for transitions")
if periods > 0:
TypeRegister.MathLedger._BASICMATH_TWO_INPUTS((self[periods:], self[:-periods], result[periods:]), math_op, 0)
# fill upfront with invalids
result[:periods] = False
else:
TypeRegister.MathLedger._BASICMATH_TWO_INPUTS((self[:periods], self[-periods:], result[:periods]), math_op, 0)
# fill back with invalids (periods is negative)
result[periods:] = False
if fancy:
return bool_to_fancy(result)
return result
#-------------------------------------------------------
def differs(self, periods=1, fancy=False):
"""
Returns a boolean array.
The boolean array is set to True when the previous item in the array equals the current.
Use -1 instead of 1 if you want True set when the next item in the array equals the previous.
See also: ``transitions``
::param periods: The number of elements to look ahead (or behind), defaults to 1
:type periods: int
:param fancy: Indicates whether to return a fancy_index instead of a boolean array, defaults to False.
:type fancy: bool
:return: boolean ``FastArray``, or fancyIndex (see: `fancy` kwarg)
"""
if self.dtype.num > 13:
result = self != self.shift(periods)
if fancy:
return bool_to_fancy(result)
return result
return self._internal_self_compare(MATH_OPERATION.CMP_EQ, periods=periods, fancy=fancy)
#---------------------------------------------------------------------------
def transitions(self, periods=1, fancy=False):
"""
Returns a boolean array.
The boolean array is set to True when the previous item in the array does not equal the current.
Use -1 instead of 1 if you want True set when the next item in the array does not equal the previous.
See also: ``differs``
:param periods: The number of elements to look ahead (or behind), defaults to 1
:type periods: int
:param fancy: Indicates whether to return a fancy_index instead of a boolean array, defaults to False.
:type fancy: bool
:return: boolean ``FastArray``, or fancyIndex (see: `fancy` kwarg)
>>> a = FastArray([0,1,2,3,3,3,4])
>>> a.transitions(periods=1)
FastArray([False, True, True, True, False, False, True])
>>> a.transitions(periods=2)
FastArray([False, False, True, True, True, False, True])
>>> a.transitions(periods=-1)
FastArray([ True, True, True, False, False, True, False])
"""
if self.dtype.num > 13:
result = self != self.shift(periods)
if fancy:
return bool_to_fancy(result)
return result
return self._internal_self_compare(MATH_OPERATION.CMP_NE, periods=periods, fancy=fancy)
#-------------------------------------------------------
def diff(self, periods=1):
"""
Only works for integers and floats.
Parameters
----------
periods: int, defaults to 1. How many elements to shift the data before subtracting.
Returns
-------
FastArray same length as current array. Invalids will fill the beginning based on the periods.
Examples
--------
>>> a=rt.arange(3, dtype=rt.int32); a.diff()
FastArray([-2147483648, 1, 1])
"""
try:
invalid = INVALID_DICT[self.dtype.num]
except:
raise TypeError(f"shift does not support the dtype {self.dtype.name!r}")
temp = empty(self.shape, dtype=self.dtype)
if abs(periods) >= len(self):
temp.fill(invalid)
elif periods > 0:
temp[:periods] = invalid
#output into the empty array we created, np.subtract will call FastArray's subtract
np.subtract(self[periods:], self[:-periods], out=temp[periods:])
else:
temp[periods:] = invalid
np.subtract(self[:periods], self[-periods:], out= temp[:periods])
return temp
#-------------------------------------------------------
def isna(self):
'''
isnan is mapped directly to isnan()
Categoricals and DateTime take over isnan.
FastArray handles sentinels.
>>> a=arange(100.0)
>>> a[5]=np.nan
>>> a[87]=np.nan
>>> sum(a.isna())
2
>>> sum(a.astype(np.int32).isna())
2
'''
return self.isnan()
def notna(self):
'''
notna is mapped directly to isnotnan()
Categoricals and DateTime take over isnotnan.
FastArray handles sentinels.
>>> a=arange(100.0)
>>> a[5]=np.nan
>>> a[87]=np.nan
>>> sum(a.notna())
98
>>> sum(a.astype(np.int32).notna())
98
'''
return self.isnotnan()
def replacena(self, value, inplace=False):
"""
Returns a copy with all invalid values set to the given value.
Optionally modify the original, this might fail if locked.
Parameters
----------
value: a replacement value
inplace: defaults False. If True modify original and return None
Returns
-------
FastArray (size and dtype == original) or None
"""
inst = self if inplace else self.copy()
isna = inst.isna()
if isna.any():
inst[isna] = value
if inplace:
return None
return inst
def fillna(self, value=None, method=None, inplace=False, limit=None):
"""
Returns a copy with all invalid values set to the given value.
Optionally modify the original, this might fail if locked.
Parameters
----------
value: a replacement value
method : {'backfill', 'bfill', 'pad', 'ffill', None},
backfill/bfill: call fill_backward
pad/ffill: calls fill_forward
None: calls replacena
inplace: defaults False. If True modify original and return None
limit: only valid when method is not None
Returns
-------
FastArray (size and dtype == original) or None
Examples
--------
>>> ds = rt.Dataset({'A': arange(3), 'B': arange(3.0)})^M
>>> ds.A[2]=ds.A.inv; ds.B[1]=np.nan;
>>> ds.fillna(FastArray.fillna, 0)
# A B
- - ----
0 0 0.00
1 1 0.00
2 0 2.00
"""
if method is not None:
if method in ['backfill','bfill']:
return self.fill_backward(value, inplace=inplace, limit=limit)
if method in ['pad','ffill']:
return self.fill_forward(value, inplace=inplace, limit=limit)
raise KeyError(f"fillna: The method {method!r} must be 'backfill', 'bfill', 'pad', 'ffill'")
if value is None:
raise ValueError(f"fillna: Must specify either a 'value' that is not None or a 'method' that is not None.")
if limit is not None:
raise KeyError(f"fillna: There is no limit when method is None")
return self.replacena(value, inplace=inplace)
#---------------------------------------------------------------------------
def _is_not_supported(self, arr):
''' returns True if a numpy array is not FastArray internally supported '''
if not (arr.flags.c_contiguous or arr.flags.f_contiguous):
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'_is_not_supported: unsupported array flags {arr.flags}')
return True
if arr.dtype.char not in NumpyCharTypes.Supported:
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'_is_not_supported: unsupported array dtype {arr.dtype}\nSupported dtypes {NumpyCharTypes.Supported}')
return True
if len(arr.strides) == 0:
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'_is_not_supported: unsupported array strides {arr.strides}')
return True
return False
# ---------------------------------------------------------------------------
def __array_function__(self, func, types, args, kwargs):
if self.NEW_ARRAY_FUNCTION_ENABLED:
return self._new_array_function(func, types, args, kwargs)
else:
return self._legacy_array_function(func, types, args, kwargs)
#---------------------------------------------------------------------------
def _legacy_array_function(self, func, types, args, kwargs):
'''
Called before array_ufunc.
Does not get called for every function np.isnan/trunc/true_divide for instance.
'''
reduceFunc=NUMPY_CONVERSION_TABLE.get(func, None)
# TODO:
# kwargs of 'axis': None 'out': None should be accepted
if reduceFunc is not None and len(kwargs)==0:
# speed path (todo add call to ledger)
# default to ddof=0 when no kwargs passed
result =rc.Reduce(args[0], reduceFunc, 0)
if result is not None:
# TypeRegister.MathLedger._REDUCE(args[0], newfunc)
dtype = kwargs.get('dtype', None)
if dtype is not None:
# user forced dtype return value
return dtype(result)
#preserve type for min/max/nanmin/nanmax
if reduceFunc in [
REDUCE_FUNCTIONS.REDUCE_MIN,
REDUCE_FUNCTIONS.REDUCE_NANMIN,
REDUCE_FUNCTIONS.REDUCE_MAX,
REDUCE_FUNCTIONS.REDUCE_NANMAX]:
return self.dtype.type(result)
#internally numpy expects a dtype returned for nanstd and other calculations
if isinstance(result,(int, np.integer)):
# for uint64, the high bit must be preserved
if self.dtype.char in NumpyCharTypes.UnsignedInteger64:
return np.uint64(result)
return np.int64(result)
return np.float64(result)
# call the version numpy wanted use to
return super(FastArray, self).__array_function__(func, types, args, kwargs)
# ---------------------------------------------------------------------------
def _new_array_function(self, func: Callable, types: tuple, args: tuple, kwargs: dict):
"""
FastArray implementation of the array function protocol.
Parameters
----------
func: callable
An callable exposed by NumPy’s public API, which was called in the form ``func(*args, **kwargs)``.
types: tuple
A tuple of unique argument types from the original NumPy function call that implement ``__array_function__``.
args: tuple
The tuple of arguments that will be passed to `func`.
kwargs: dict
The dictionary of keyword arguments that will be passed to `func`.
Raises
------
TypeError
If `func` is not overridden by a corresponding riptable array function then a TypeError is raised.
Notes
-----
This array function implementation requires each class, such as FastArray and any other derived class,
to implement their own version of the Numpy array function API. In the event these array functions defer to the
inheriting class they will need to either re-wrap the results in the correct type or raise exception if a
particular operation is not well-defined nor meaningful for the derived class.
If an array function, which is also a universal function, is not overridden as an array function, but defined
as a ufunc then it will not be called unless it is registered with the array function helper since array function
protocol takes priority over the universal function protocol.
See Also
--------
For information around the Numpy array function protocol see NEP 18:
https://numpy.org/neps/nep-0018-array-function-protocol.html
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{FastArray.__name__}._new_array_function(fun={func}, types={types}, args={args}, kwargs={kwargs})')
# handle `func` argument
array_func: Callable = FastArray._ArrayFunctionHelper.get_array_function(func)
if array_func is None:
# fallback to numpy for unhandled array functions and attempt to cast back to FastArray
result = super().__array_function__(func, types, args, kwargs)
if result is NotImplemented:
return NotImplemented
elif isinstance(result, np.ndarray):
return result.view(FastArray)
elif isinstance(result, list):
return [(x.view(FastArray) if isinstance(x, np.ndarray) else x) for x in result]
elif isinstance(result, tuple):
return tuple([(x.view(FastArray) if isinstance(x, np.ndarray) else x) for x in result])
else:
# Unknown result type.
raise TypeError(f"Unknown result type '{type(result)}' returned by ndarray.{func}.")
# handle `types` argument
array_func_type_check: Callable = FastArray._ArrayFunctionHelper.get_array_function_type_compatibility_check(func)
if array_func_type_check is None:
# no custom type compatibility check; default type compatibility check
# this allows subclasses that don't override __array_function__ to handle FastArray objects
for typ in types:
if not issubclass(typ, FastArray):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{FastArray.__name__}.__array_function__: unsupported type {repr(typ)}')
return NotImplemented
else: # custom type compatibility check
valid: bool = array_func_type_check(types)
if not valid:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{FastArray.__name__}.__array_function__: unsupported type in {repr(types)}')
return NotImplemented
return array_func(*args, **kwargs)
#---------------------------------------------------------------------------
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
"""
The FastArray universal function (or ufunc) override offers multithreaded C/C++ implementation at the RiptideCPP layer.
When FastArray receives a `ufunc` callable it will attempt to handle it in priority order:
1. considering ``FastArray`` ``FastFunction`` is enabled, ufunc is handled by an explicit ufunc override, otherwise
2. ufunc is handled at the Riptable / Numpy API overrides level, otherwise
3. ufunc is handled at the Numpy API level.
Given a combination of `ufunc`, `inputs`, and `kwargs`, if neither of the aforementioned cases support this
then a warning is emitted.
The following references to supported ufuncs are grouped by method type.
- For `method` type ``reduce``, see ``gReduceUFuncs``.
- For `method` type ``__call__``, see ``gBinaryUFuncs``, ``gBinaryLogicalUFuncs``, ``gBinaryBitwiseUFuncs``, and ``gUnaryUFuncs``.
- For `method` type ``at`` return ``None``.
If `out` argument is specified, then an extra array copy is performed on the result of the ufunc computation.
If a `dtype` keyword is specified, all efforts are made to respect the `dtype` on the result of the computation.
Parameters
----------
ufunc : callable
The ufunc object that was called.
method : str
A string indicating which Ufunc method was called (one of "__call__", "reduce", "reduceat", "accumulate", "outer", "inner").
inputs
A tuple of the input arguments to the ufunc.
kwargs
A dictionary containing the optional input arguments of the ufunc. If given, any out arguments, both positional and keyword, are passed as a tuple in kwargs.
Returns
-------
The method should return either the result of the operation, or NotImplemented if the operation requested is not implemented.
Notes
-----
The current implementation does not support the following keyword arguments: `casting`, `sig`, `signature`, and
`core_signature`.
It has partial support for keyword arguments: `where`, `axis`, and `axes`, if they match
the default values.
If FastArray's ``WarningLevel`` is enabled, then warnings will be emitted if any of unsupported or partially
supported keyword arguments are passed.
TODO document custom up casting rules.
See Also
--------
For more information on ufunc see the following numpy documents:
- https://numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
- https://numpy.org/doc/stable/reference/ufuncs.html
Note, the docstring Parameters and Return section is repeated from the numpy
`__array_ufunc__` docstring since this is overriding that method.
"""
# TODO consider using type annotation typing.Final for these read-only variables when moving to Python 3.8
# Python 3.8 added support for typing.Final. Final will catch unintended assignments for constants when running
# static type checkers such as mypy.
_UNSUPPORTED_KEYWORDS: Tuple[str, str, str, str] = ('casting', 'sig', 'signature', 'core_signature')
_PARTIALLY_SUPPORTED_KEYWORDS_TO_DEFAULTS: Mapping[str, Union[None, bool]] = {'where': True, 'axis': None, 'axes': None}
toplevel_abort: bool = False
if FastArray.Verbose > 2:
print("*** top level array_ufunc", ufunc, method, *inputs, kwargs)
# flip any inputs that are fastarrays back to an ndarray...
args: List[Any] = []
for input in inputs:
if isinstance(input, np.ndarray):
is_not_supported = self._is_not_supported(input)
if is_not_supported:
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'__array_ufunc__: unsupported input "{input}"')
toplevel_abort |= is_not_supported
args.append(input)
# Check for numpy rules that we cannot handle.
for kw in _UNSUPPORTED_KEYWORDS:
if kw in kwargs:
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'__array_ufunc__: unsupported keyword argument "{kw}"')
toplevel_abort |= True
# Check for numpy rules that we partially support; that is, where we only support
# the keyword if the value is some default value and otherwise punt to numpy.
# The value associated with each keyword in the dictionary is the only value we'll
# support for that keyword.
# For example, in numpy 1.17 the sum() function passes where=True by default.
for kw, default_val in _PARTIALLY_SUPPORTED_KEYWORDS_TO_DEFAULTS.items():
if kw in kwargs:
# Use a type check before equality here to avoid errors caused
# by checking equality between bools and arrays.
kwarg_val = kwargs[kw]
if type(default_val) != type(kwarg_val) or kwarg_val != default_val:
toplevel_abort |= True
dtype: Optional[np.dtype] = kwargs.get('dtype', None)
has_outputs: bool = False
out_args: List[Any] = []
#flip any outputs to ndarray...
outputs = kwargs.pop('out', None)
if outputs:
has_outputs = True
for output in outputs:
if isinstance(output, np.ndarray):
is_not_supported = self._is_not_supported(output)
if is_not_supported:
# TODO enable this warning in a future minor release
# FastArray._possibly_warn(f'__array_ufunc__: unsupported output "{output}"')
toplevel_abort |= is_not_supported
out_args.append(output)
#replace out
kwargs['out'] = tuple(out_args)
else:
# TJD - here outputs was not specified
# now if UFunc.nout ==1, this function requires an output
outputs = (None,) * ufunc.nout
# See https://docs.python.org/3/c-api/typeobj.html
# See Number Object Structures and Mapping Object Structure for indexing
#ufunc.nin The number of inputs.
#ufunc.nout The number of outputs.
#ufunc.nargs The number of arguments.
#ufunc.ntypes The number of types.
#ufunc.types Returns a list with types grouped input->output.
#ufunc.identity The identity value.
final_dtype: Optional[np.dtype] = None
fast_function: Optional[MATH_OPERATION] = None
reduce_func: Optional[REDUCE_FUNCTIONS] = None
# Handle reduce ufunc methods.
# note: when method is 'at' this is an inplace unbuffered operation
# this can speed up routines that use heavy masked operations
if method == 'reduce' and FastArray.FasterUFunc and not toplevel_abort:
# a.any() and a.all() are logical reduce operations
# Examples
# Look for axis:None -- otherwise ABORT
# Then look for Keepdims wihch means to wrap result in list/array?
# Then check dtype also
#
#In [22]: t=FA([[3,4,5],[6,7,8]])
#In [23]: np.add.reduce(t)
#!!reduce reduce nin: 2 1 <ufunc 'add'> [array([[3, 4, 5],
# [6, 7, 8]])] out: (None,) kwargs: {}
#results [ 9 11 13]
#Out[23]: array([ 9, 11, 13])
#In [24]: np.add.reduce(t, axis=None)
#!!reduce reduce nin: 2 1 <ufunc 'add'> [array([[3, 4, 5],
# [6, 7, 8]])] out: (None,) kwargs: {'axis': None}
#results 33
#Out[24]: 33
#In [25]: np.add.reduce(t, axis=None, keepdims=True)
#!!reduce reduce nin: 2 1 <ufunc 'add'> [array([[3, 4, 5],
# [6, 7, 8]])] out: (None,) kwargs: {'axis': None, 'keepdims': True}
#results [[33]]
#Out[25]: array([[33]])
#In [26]: np.add.reduce(t, axis=None, keepdims=True, dtype=np.float32)
#!!reduce reduce nin: 2 1 <ufunc 'add'> [array([[3, 4, 5],
# [6, 7, 8]])] out: (None,) kwargs: {'axis': None, 'keepdims': True, 'dtype': <class 'numpy.float32'>}
#results [[33.]]
#Out[26]: array([[33.]], dtype=float32)
#print("!!reduce ", method, 'nin:', ufunc.nin, ufunc.nout, ufunc, args, 'out:', outputs, 'kwargs:', kwargs,'ndim', args[0].ndim)
#resultN = super(FastArray, self).__array_ufunc__(ufunc, method, *args, **kwargs)
#print("!!result numpy", resultN, type(resultN))
# NOTE:
# look for reduce logical_or
# look for reduce_logical_and (used with np.fmin for instance)
reduce_func=gReduceUFuncs.get(ufunc,None)
# check if we can proceed to calculate a faster way
if method == '__call__' and FastArray.FasterUFunc and not toplevel_abort:
# check for binary ufunc
if len(args)==2 and ufunc.nout==1:
###########################################################################
## BINARY
###########################################################################
array_types: List[np.dtype] = []
scalar_types: List[ScalarType] = []
scalars: int = 0
abort: int = 0
for arr in args:
arrType = type(arr)
if arrType in ScalarType:
scalars += 1
scalar_types.append(arrType)
else:
try:
array_types.append(arr.dtype)
# check for non contingous arrays
if arr.itemsize != arr.strides[0]: abort =1
except:
abort=1
# can happen when None or a python list is passed
if (FastArray.Verbose > 1):
print(f"**dont know how to handle array {arr} args: {args}")
if abort == 0:
if scalars < 2:
is_logical = 0
# check for add, sub, mul, divide, power
fast_function= gBinaryUFuncs.get(ufunc, None)
if fast_function is None:
#check for comparison and logical or/and functions
fast_function = gBinaryLogicalUFuncs.get(ufunc, None)
if fast_function is not None:
if (FastArray.Verbose > 2):
print(f"**logical function called {ufunc} args: {args}")
is_logical = 1
final_dtype = np.bool_
if fast_function is None:
#check for bitwise functions? (test this)
fast_function = gBinaryBitwiseUFuncs.get(ufunc, None)
if fast_function is not None:
if has_outputs and is_logical == 0:
# have to conform to output
final_dtype = out_args[0].dtype
else:
if is_logical == 1 and scalars == 1:
# NOTE: scalar upcast rules -- just apply to logicals so that arr < 5 does not upcast?
# or globally apply this rule so that arr = arr + 5
#if scalars == 1:
#special case have to see if scalar is in range
if type(args[0]) in ScalarType:
scalar_val = args[0]
else:
scalar_val = args[1]
final_dtype = logical_find_common_type(array_types, scalar_types, scalar_val)
else:
print
# TODO: check for bug where np.int32 type 7 gets flipped to np.int32 type 5
if scalars ==0 and len(array_types)==2 and (array_types[0] == array_types[1]):
final_dtype = array_types[0]
else:
# check for int scalar against int
# bug where np.int8 and then add +1999 or larger number. need to upcast
if scalars == 1 and array_types[0].num <=10:
if type(args[0]) in ScalarType:
scalar_val = args[0]
else:
scalar_val = args[1]
final_dtype = logical_find_common_type(array_types, scalar_types, scalar_val)
else:
final_dtype = np.find_common_type(array_types, scalar_types)
# if we are adding two strings or unicode, special case
# if we think the final dtype is an object, check if this is really two strings
if fast_function == MATH_OPERATION.ADD and (array_types[0].num == 18 or array_types[0].num == 19) :
# assume addition of two strings
final_dtype = array_types[0]
if scalars != 0:
# we have a scalar... make sure we convert it
if type(args[0]) in ScalarType:
# fix scalar type make sure string or unicode
if array_types[0].num == 18:
args[0] = str.encode(str(args[0]))
if array_types[0].num == 19:
args[0] = str(args[0])
else:
if array_types[0].num == 18:
args[1] = str.encode(str(args[1]))
if array_types[0].num == 19:
args[1] = str(args[1])
else:
# we have two arrays, if one array is not proper string type, convert it
if array_types[1] != final_dtype:
if array_types[0].num == 18:
args[1] = args[1].astype('S')
if array_types[0].num == 19:
args[1] = args[1].astype('U')
if (FastArray.Verbose > 2):
print("ADD string operation", array_types, scalar_types)
elif scalars ==0:
if array_types[0] != array_types[1]:
# UPCAST RULES
if array_types[0] == final_dtype and array_types[1] != final_dtype:
#print("!!!upcast rules second", array_types[0], array_types[1], final_dtype)
#convert to the proper type befor calculation
args[1] = _ASTYPE(args[1], final_dtype)
elif array_types[0] != final_dtype and array_types[1] == final_dtype:
#print("!!!upcast rules first", array_types[0], array_types[1], final_dtype)
#convert to the proper type befor calculation
args[0] = _ASTYPE(args[0], final_dtype)
else:
# sometimes both of them must be upcast...
# consider int8 * uint8 ==> will upcast to int16
#print("!!!cannot understand upcast rules", arraytypes[0], arraytypes[1], final_dtype)
args[0] = _ASTYPE(args[0], final_dtype)
args[1] = _ASTYPE(args[1], final_dtype)
#TJD check logic here... what does numpy when int* * uint8 ? speed test
##UseNumpy = True
else:
# UPCAST RULES when one is a scalar
if array_types[0] != final_dtype:
# which argument is the scalar? convert the other one
if type(args[0]) in ScalarType:
#print("converting arg2 from", args[1], final_dtype)
args[1] = _ASTYPE(args[1], final_dtype)
else:
#print("converting arg1 from ", args[0], final_dtype)
args[0] = _ASTYPE(args[0], final_dtype)
# not a binary ufunc, check for unary ufunc
# check for just 1 input (unary)
elif ((ufunc.nin==1) and (ufunc.nout==1)):
###########################################################################
## UNARY
###########################################################################
fast_function= gUnaryUFuncs.get(ufunc, None)
else:
if (FastArray.Verbose > 1):
print("***unknown ufunc arg style: ", ufunc.nin, ufunc.nout, ufunc, args, kwargs)
# -------------------------------------------------------------------------------------------------------------
if not FastArray.FasterUFunc:
fast_function = None
reduce_func= None
# check for a reduce func like sum or min
if reduce_func is not None:
keepdims: bool = kwargs.get('keepdims',False)
if dtype is None: dtype = args[0].dtype
#MathLedger
result= TypeRegister.MathLedger._REDUCE(args[0], reduce_func)
char = np.dtype(dtype).char
if FastArray.Verbose > 1:
print("***result from reduce", result, type(result), dtype, char)
if result is not None:
#print("reduce called", ufunc, keepdims, dtype)
if reduce_func in [REDUCE_FUNCTIONS.REDUCE_SUM, REDUCE_FUNCTIONS.REDUCE_NANSUM] and isinstance(result, float):
result = np.float64(result)
elif (dtype != np.float32 and dtype != np.float64):
# preserve integers
if char in NumpyCharTypes.UnsignedInteger64:
# preserve high bit
result = np.uint64(result)
else:
result = np.int64(result)
else:
result=np.float64(result)
# MIN/MAX need to return same type
if (reduce_func >= REDUCE_FUNCTIONS.REDUCE_MIN):
# min max not allowed on empty array per unit test
if (len(args[0])==0): raise ValueError("min/max arg is an empty sequence.")
# min/max/nanmin/nanmax -- same result
if dtype == np.bool_:
result =np.bool(result)
else:
result=dtype.type(result)
if (keepdims):
result= FastArray([result]).astype(dtype)
elif (keepdims):
# force back into an array from scalar
result= FastArray([result])
# we did the reduce, now return the result
return result
# check for normal call function
elif fast_function is not None:
# Call the FastArray APIs instead of numpy
#callmode = 'f'
results=None
if ufunc.nin==2:
final_num=-1
if final_dtype is not None:
if final_dtype == np.bool_:
final_num=0
else:
final_num=final_dtype.num
# because scalars can be passed as np.int64(864000)
if type(args[0]) in gNumpyScalarType:
#print('converting arg1', args[0])
args[0]=np.asarray(args[0]);
if type(args[1]) in gNumpyScalarType:
#print('converting arg2', args[1])
args[1]=np.asarray(args[1]);
if FastArray.Verbose > 2:
print("*** binary think we can call", fast_function, ufunc.nin, ufunc.nout, "arg1", args[0], "arg2", args[1], "out", out_args, "final", final_num)
if len(out_args)==1:
results = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS((args[0], args[1], out_args[0]), fast_function, final_num)
else:
results = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS((args[0], args[1]), fast_function, final_num)
else:
#for conversion functions
#dtype=kwargs.get('dtype',None)
if FastArray.Verbose > 2:
print("*** unary think we can call", fast_function, ufunc.nin, ufunc.nout, "arg1", args[0], "out", out_args)
if len(out_args)==1:
results = TypeRegister.MathLedger._BASICMATH_ONE_INPUT((args[0], out_args[0]), fast_function,0)
else:
results = TypeRegister.MathLedger._BASICMATH_ONE_INPUT((args[0]), fast_function,0)
if results is not None and len(out_args)==1:
# when the output argument is forced but we calculate it into another array we need to copy the result into the output
if not rc.CompareNumpyMemAddress(out_args[0], results):
if FastArray.Verbose > 2:
print("*** performing an extra copy to match output request", id(out_args[0]), id(results), out_args[0], results)
out_args[0][...]=results
results = out_args[0]
if results is None:
#punted
#callmode='p'
if (FastArray.Verbose > 1):
print("***punted ufunc: ", ufunc.nin, ufunc.nout, ufunc, args, kwargs)
fast_function =None
# fall to "if fast_function is None" and run through numpy...
# respect dtype
elif dtype is not None and isinstance(results, np.ndarray):
if dtype is not results.dtype:
if FastArray.Verbose > 1: print("***result from reduce", results, results.dtype, dtype)
# convert
results = results.astype(dtype)
if fast_function is None:
# Call the numpy APIs
# Check if we can use the recycled arrays to avoid an allocation for the output array
if FastArray.Verbose > 1:
print("**punted on numpy!", ufunc)
# NOTE: We are going to let numpy process it
# We must change all FastArrays to normal numpy arrays
args = []
for input in inputs:
#flip back to numpy to avoid errors when numpy calculates
if isinstance(input, FastArray):
args.append(input.view(np.ndarray))
else:
args.append(input)
if has_outputs:
outputs = kwargs.pop('out', None)
if outputs:
out_args=[]
for output in outputs:
if isinstance(output, FastArray):
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
#replace out
kwargs['out'] = tuple(out_args)
# NOTE: If the specified ufunc + inputs combination isn't supported by numpy either,
# as of numpy 1.17.x this call will end up raising a UFuncTypeError so the rest
# of the FastArray.__array_ufunc__ body (below) won't end up executing.
results = TypeRegister.MathLedger._ARRAY_UFUNC(super(FastArray, self),ufunc, method, *args, **kwargs)
# If riptable has not implemented a certain ufunc (or doesn't support it for the given arguments),
# emit a warning about it to let the user know.
# When numpy does not support the ufunc+inputs either, we won't reach this point (as of numpy 1.17.x),
# since numpy will raise a UFuncTypeError earlier (before this point) rather than after we return NotImplemented.
if results is NotImplemented:
warnings.warn(f"***ufunc {ufunc} {args} {kwargs} is not implemented")
return NotImplemented
#Ufuncs also have a fifth method that allows in place operations to be performed using fancy indexing.
#No buffering is used on the dimensions where fancy indexing is used, so the fancy index can list an item more than once
# and the operation will be performed on the result of the previous operation for that item.
#ufunc.reduce(a[, axis, dtype, out, keepdims]) Reduces a's dimension by one, by applying ufunc along one axis.
#ufunc.accumulate(array[, axis, dtype, out]) Accumulate the result of applying the operator to all elements.
#ufunc.reduceat(a, indices[, axis, dtype, out]) Performs a (local) reduce with specified slices over a single axis.
#ufunc.outer(A, B) Apply the ufunc op to all pairs (a, b) with a in A and b in B.
#ufunc.at(a, indices[, b]) Performs unbuffered in place operation on operand 'a' for elements specified by 'indices'.
if method == 'at':
return
if ufunc.nout == 1:
#check if we used our own output
#if isinstance(outArray, np.ndarray):
# return outArray.view(FastArray)
#if (final_dtype != None and final_dtype != results.dtype):
# print("****** mispredicted final", final_dtype, results.dtype, ufunc, scalartypes, args, outputs, kwargs);
#results = (results,)
if not isinstance(results,FastArray) and isinstance(results,np.ndarray):
return results.view(FastArray)
# think hit here for sum wihch does not return an array, just a number
return results
# more than one item, so we are making a tuple
# can result in __array_finalize__ being called
results = tuple((np.asarray(result).view(FastArray)
if output is None else output)
for result, output in zip(results, outputs))
# check if we have a tuple of one item, if so just return the one item
if len(results) == 1:
results = results[0]
return results
@property
def numbastring(self):
'''
converts byte string and unicode strings to a 2dimensional array
so that numba can process it correctly
Examples
--------
>>> @numba.jit(nopython=True)
... def numba_str(txt):
... x=0
... for i in range(txt.shape[0]):
... if (txt[i,0]==116 and # 't'
... txt[i,1]==101 and # 'e'
... txt[i,2]==120 and # 'x'
... txt[i,3]==116): # 't'
... x += 1
... return x
>>>
>>> x=FastArray(['some','text','this','is'])
>>> numba_str(x.view(np.uint8).reshape((len(x), x.itemsize)))
>>> numba_str(x.numbastring)
'''
intype=self.dtype.__str__()
if intype[0]=='|' or intype[0]=='<':
if intype[1]=='S':
return self.view(np.uint8).reshape((len(self), self.itemsize))
if intype[1]=='U':
return self.view(np.uint32).reshape((len(self), self.itemsize//4))
return self
#-----------------------------------------------------------
def apply_numba(self, *args, otype=None, myfunc="myfunc",name=None):
'''
Usage:
-----
Prints to screen an example numba signature for the array.
You can then copy this example to build your own numba function.
Inputs:
------
Can pass in multiple test arguments.
kwargs
------
otype: specify a different output type
myfunc: specify a string to call the function
name: specify a string to name the array
Example using numba
-------------------
>>> import numba
>>> @numba.guvectorize(['void(int64[:], int64[:])'], '(n)->(n)')
... def squarev(x,out):
... for i in range(len(x)):
... out[i]=x[i]**2
...
>>> a=arange(1_000_000).astype(np.int64)
>>> squarev(a)
FastArray([ 0, 1, 4, ..., 999994000009,
999996000004, 999998000001], dtype=int64)
'''
if name is None:
# try first to get the name
name=self.get_name()
if name is None:
name="a"
intype=self.dtype.__str__()
if otype is None:
outtype=self.dtype.__str__()
else:
outtype=np.dtype(otype).__str__()
# TODO: what if unicode or string? .frombuffer/.view(np.uint8)
preamble = "import numba\n@numba.guvectorize([\n"
middle=f"'void({intype}[:], {outtype}[:])', # <-- can stack multiple different dtypes x.view(np.uint8).reshape(-1, x.itemsize)\n"
postamble=" ], '(n)->(n)', target='cpu')\n"
code=f"def {myfunc}(data_in, data_out):\n for i in range(len(data_in)):\n data_out[i]=data_in[i] #<-- put your code here\n"
exec = preamble+middle+postamble+code
print("Copy the code snippet below and rename myfunc")
print("---------------------------------------------")
print(exec)
print("---------------------------------------------")
if intype[0]=='|' or intype[0]=='<':
if intype[1]=='S':
print(f"Then call {myfunc}({name}.numbastring,empty_like({name}).numbastring) where {name} is the input array")
elif intype[1]=='U':
print(f"Then call {myfunc}({name}.numbastring,empty_like({name}).numbastring) where {name} is the input array")
else:
print(f"Then call {myfunc}({name},empty_like({name})) where {name} is the input array")
#return exec
def apply(self, pyfunc, *args, otypes=None, doc=None, excluded =None, cache=False, signature=None):
"""
Generalized function class. see: np.vectorize
Creates and then applies a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns an single or tuple of numpy array as
output. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
signature : string, optional
Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
be called with (and expected to return) arrays with shapes given by the
size of corresponding core dimensions. By default, ``pyfunc`` is
assumed to take scalars as input and output.
.. versionadded:: 1.12.0
Returns
-------
vectorized : callable
Vectorized function.
See Also
--------
FastArray.apply_numba
FastArray.apply_pandas
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>>
>>> a=arange(10)
>>> b=arange(10)+1
>>> a.apply(myfunc,b)
FastArray([ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
Example with one input array
>>> def square(x):
... return x**2
>>>
>>> a=arange(10)
>>> a.apply(square)
FastArray([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
Example with lambda
>>> a=arange(10)
>>> a.apply(lambda x: x**2)
FastArray([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
Example with numba
>>> from numba import jit
>>> @jit
... def squareit(x):
... return x**2
>>> a.apply(squareit)
FastArray([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
Examples to use existing builtin oct function but change the output from string, to unicode, to object
>>> a=arange(10)
>>> a.apply(oct, otypes=['S'])
FastArray([b'0o0', b'0o1', b'0o2', b'0o3', b'0o4', b'0o5', b'0o6', b'0o7', b'0o10', b'0o11'], dtype='|S4')
>>> a=arange(10)
>>> a.apply(oct, otypes=['U'])
FastArray(['0o0', '0o1', '0o2', '0o3', '0o4', '0o5', '0o6', '0o7', '0o10', '0o11'], dtype='<U4')
>>> a=arange(10)
>>> a.apply(oct, otypes=['O'])
FastArray(['0o0', '0o1', '0o2', '0o3', '0o4', '0o5', '0o6', '0o7', '0o10', '0o11'], dtype=object)
"""
vfunc = np.vectorize(pyfunc, otypes=otypes, doc=doc, excluded=excluded, cache=cache, signature=signature)
result=vfunc(self, *args)
return result
#-----------------------------------------------------------
def apply_pandas(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of FastArray. Can be ufunc (a NumPy function
that applies to the entire FastArray) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : FastArray or Dataset if func returns a FastArray
See Also
--------
FastArray.map: For element-wise operations
FastArray.agg: only perform aggregating type operations
FastArray.transform: only perform transforming type operations
Examples
--------
Create a FastArray with typical summer temperatures for each city.
>>> fa = rt.FastArray([20, 21, 12], index=['London', 'New York','Helsinki'])
>>> fa
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> fa.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> fa.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> fa.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> fa.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> fa.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
import pandas as pd
series = pd.Series(self)
result = series.apply(func, convert_dtype=convert_dtype, args=args, **kwds)
return result.values
#-----------------------------------------------------------
@property
def str(self):
r"""Casts an array of byte strings or unicode as ``FAString``.
Enables a variety of useful string manipulation methods.
Returns
-------
FAString
Raises
------
TypeError
If the FastArray is of dtype other than byte string or unicode
See Also
--------
np.chararray
np.char
rt.FAString.apply
Examples
--------
>>> s=FA(['this','that','test ']*100_000)
>>> s.str.upper
FastArray([b'THIS', b'THAT', b'TEST ', ..., b'THIS', b'THAT', b'TEST '],
dtype='|S5')
>>> s.str.lower
FastArray([b'this', b'that', b'test ', ..., b'this', b'that', b'test '],
dtype='|S5')
>>> s.str.removetrailing()
FastArray([b'this', b'that', b'test', ..., b'this', b'that', b'test'],
dtype='|S5')
"""
if self.dtype.char in 'US':
return TypeRegister.FAString(self)
if self.dtype.char == 'O':
# try to convert to string (might have come from pandas)
try:
conv = self.astype('S')
except:
conv = self.astype('U')
return TypeRegister.FAString(conv)
raise TypeError(f"The .str function can only be used on byte string and unicode not {self.dtype!r}")
#-----------------------------------------------------------
@classmethod
def register_function(cls, name, func):
'''
Used to register functions to FastArray.
Used by rt_fastarraynumba
'''
setattr(cls, name, func)
def apply_schema(self, schema):
"""
Apply a schema containing descriptive information to the FastArray
:param schema: dict
:return: dictionary of deviations from the schema
"""
from .rt_meta import apply_schema as _apply_schema
return _apply_schema(self, schema)
def info(self, **kwargs):
"""
Print a description of the FastArray's contents
"""
from .rt_meta import info as _info
return _info(self, **kwargs)
@property
def doc(self):
"""
The Doc object for the structure
"""
from .rt_meta import doc as _doc
return _doc(self)
# ====================== END OF CLASS DEFINITION ===============================
#-----------------------------------------------------------
def _setfastarrayview(arr):
'''
Call from CPP into python to flip array view
'''
if isinstance(arr, FastArray):
if FastArray.Verbose > 2:
print("no need to setfastarrayview", arr.dtype, len(arr))
return arr
if FastArray.Verbose > 2:
print("setfastarrayview", arr.dtype, len(arr))
return arr.view(FastArray)
#-----------------------------------------------------------
def _setfastarraytype():
#-----------------------------------------------------------
# calling this function will force fm to return FastArray subclass
#rc.BasicMathHook(FastArray, np.ndarray)
# Coming next build
fa=np.arange(1).view(FastArray)
rc.SetFastArrayType(fa, _setfastarrayview)
rc.BasicMathHook(fa, fa._np)
#-----------------------------------------------------------
def _FixupDocStrings():
"""
Load all the member function of this module
Load all the member functions of the np module
If we find a match, copy over the doc strings
"""
import inspect
import sys
mymodule=sys.modules[__name__]
all_myfunctions = inspect.getmembers(FastArray, inspect.isfunction)
try:
# bottleneck is optional
all_bnfunctions = inspect.getmembers(bn, inspect.isfunction)
all_bnfunctions += inspect.getmembers(bn, inspect.isbuiltin)
# build dictionary of bottleneck docs
bndict={}
for funcs in all_bnfunctions:
bndict[funcs[0]]=funcs[1]
# now for each function that has an bn flavor, copy over the doc strings
for funcs in all_myfunctions:
if funcs[0] in bndict:
funcs[1].__doc__ = bndict[funcs[0]].__doc__
except Exception:
pass
all_npfunctions = [func for func in inspect.getmembers(np.ndarray)
if not func[0].startswith('_')]
# build dictionary of np.ndarray docs
npdict={}
for funcs in all_npfunctions:
npdict[funcs[0]]=funcs[1]
# now for each function that has an np flavor, copy over the doc strings
for funcs in all_myfunctions:
if funcs[0] in npdict:
funcs[1].__doc__ = npdict[funcs[0]].__doc__
# now do just plain np
all_npfunctions = [func for func in inspect.getmembers(np)
if '__' not in funcs[0]]
# build dictionary of np docs
npdict={}
for funcs in all_npfunctions:
#print("getting doc string for ", funcs[0])
npdict[funcs[0]]=funcs[1]
# now for each function that has an np flavor, copy over the doc strings
for funcs in all_myfunctions:
if funcs[0] in npdict:
funcs[1].__doc__ = npdict[funcs[0]].__doc__
#----------------------------------------------------------
class Threading():
@staticmethod
def on():
'''
Turn riptable threading on.
Used only when riptable threading was turned off.
Example
-------
a=rt.arange(1_000_00)
Threading.off()
%time a+=1
Threading.on()
%time a+=1
Returns
-------
Previously whether threading was on or not. 0 or 1. 0=threading was off before.
'''
return FastArray._TON()
@staticmethod
def off():
'''
Turn riptable threading off.
Useful for when the system has other processes using other threads
or to limit threading resources.
Example
-------
a=rt.arange(1_000_00)
Threading.off()
%time a+=1
Threading.on()
%time a+=1
Returns
-------
Previously whether threading was on or not. 0 or 1. 0=threading was off before.
'''
return FastArray._TOFF()
@staticmethod
def threads(threadcount):
'''
Set how many worker threads riptable can use.
Often defaults to 12 and cannot be set below 1 or > 31.
To turn riptable threading off completely use Threading.off()
Useful for when the system has other processes using other threads
or to limit threading resources.
Example
-------
Threading.threads(8)
Returns
-------
number of threads previously used
'''
return rc.SetThreadWakeUp(threadcount)
#----------------------------------------------------------
class Recycle():
@staticmethod
def on():
'''
Turn riptable recycling on.
Used only when riptable recycling was turned off.
Example
-------
a=arange(1_000_00)
Recycle.off()
%timeit a=a + 1
Recycle.on()
%timeit a=a + 1
'''
return FastArray._RON()
@staticmethod
def off():
return FastArray._ROFF()
@staticmethod
def now(timeout:int = 0):
'''
Pass the garbage collector timeout value to cleanup.
Also calls the python garbage collector.
Parameters
----------
timeout: default to 0. 0 will not set a timeout
Returns
-------
total arrays deleted
'''
import gc
gc.collect()
result= rc.RecycleGarbageCollectNow(timeout)['TotalDeleted']
if result > 0:
rc.RecycleGarbageCollectNow(timeout)
return result
@staticmethod
def timeout(timeout:int = 100):
'''
Pass the garbage collector timeout value to expire.
The timeout value is roughly in 2/5 secs.
A value of 100 is usually about 40 seconds.
If an array has not been reused by the timeout, it is permanently deleted.
Returns
-------
previous timespan
'''
return rc.RecycleSetGarbageCollectTimeout(timeout)
#----------------------------------------------------------
class Ledger():
@staticmethod
def on():
'''Turn the math ledger on to record all array math routines'''
return TypeRegister.MathLedger._LedgerOn()
@staticmethod
def off():
'''Turn the math ledger off'''
return TypeRegister.MathLedger._LedgerOff()
@staticmethod
def dump(dataset=True):
'''Print out the math ledger'''
return TypeRegister.MathLedger._LedgerDump(dataset=dataset)
@staticmethod
def to_file(filename):
'''Save the math ledger to a file'''
return TypeRegister.MathLedger._LedgerDumpFile(filename)
@staticmethod
def clear():
'''Clear all the entries in the math ledger'''
return TypeRegister.MathLedger._LedgerClear()
#----------------------------------------------------------
# this is called when the module is loaded
_FixupDocStrings()
# NOTE: Keep this at the end of the file
#-----------------------------------------------------------
# calling this function will force fm to return FastArray subclass
_setfastarraytype()
TypeRegister.FastArray=FastArray
FastArray.register_function('describe', describe)
| 42.118437 | 207 | 0.54717 |
acee0450ae63813ffedcffacf947818f1dcba2f2 | 1,398 | py | Python | pgsmo/objects/view/view.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 33 | 2019-05-27T13:04:35.000Z | 2022-03-17T13:33:05.000Z | pgsmo/objects/view/view.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 31 | 2019-06-10T01:55:47.000Z | 2022-03-09T07:27:49.000Z | pgsmo/objects/view/view.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 25 | 2019-05-13T18:39:24.000Z | 2021-11-16T03:07:33.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os.path
from smo.common.node_object import NodeCollection, NodeObject
from pgsmo.objects.view.view_base import ViewBase
from pgsmo.objects.table_objects.rule import Rule
from pgsmo.objects.table_objects.trigger import Trigger
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class View(ViewBase):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'view_templates')
@classmethod
def _template_root(cls, server: 's.Server'):
return os.path.join(cls.TEMPLATE_ROOT, server.server_type)
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
ViewBase.__init__(self, server, parent, name)
self._rules: NodeCollection[Rule] = self._register_child_collection(Rule)
self._triggers: NodeCollection[Trigger] = self._register_child_collection(Trigger)
@property
def rules(self) -> NodeCollection[Rule]:
return self._rules
@property
def triggers(self) -> NodeCollection[Trigger]:
return self._triggers
| 39.942857 | 94 | 0.65093 |
acee068d996a2489cb1fd17bc2b384bd72d38ea9 | 167 | py | Python | main/__init__.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | main/__init__.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | main/__init__.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | __version__ = '1.2.1'
__version_info__ = tuple([int(num) if num.isdigit() else num for num in
__version__.replace('-', '.', 1).split('.')])
| 33.4 | 71 | 0.550898 |
acee07184eccee56bc00263e8140ad8eabcf1233 | 1,373 | py | Python | Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py | murex/coding-dojo | 7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275 | [
"MIT"
] | 10 | 2015-08-05T15:27:06.000Z | 2018-10-10T13:57:42.000Z | Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py | murex/coding-dojo | 7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275 | [
"MIT"
] | 6 | 2015-09-09T14:04:18.000Z | 2016-09-01T19:46:50.000Z | Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py | murex/coding-dojo | 7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275 | [
"MIT"
] | 10 | 2015-08-12T12:26:42.000Z | 2016-03-09T12:44:06.000Z | import power4
#def test_final():
# assert power4.isOver([
# [0, 1, 2, 1, 2, 1, 1],
# [0, 2, 1, 1, 1, 2, 2],
# [0, 1, 2, 2, 2, 1, 1],
# [1, 2, 1, 1, 1, 2, 2],
# [2, 1, 1, 2, 2, 2, 1],
# [1, 2, 2, 1, 2, 1, 2],
# ]) == True
def test_p1_win_one_line():
assert power4.isOver([[0, 1, 1, 1, 1, 2, 1]]) == True
def test_p2_win_one_line():
assert power4.isOver([[0, 1, 2, 2, 2, 2, 1]]) == True
def test_draw_one_line():
assert power4.isOver([[0, 1, 2, 1, 2, 2, 1]]) == False
def test_p1_win_with_two_lines():
assert power4.isOver([
[0, 1, 2, 1, 2, 2, 1],
[0, 1, 1, 1, 1, 2, 1]
]) == True
def test_p1_wins_one_column():
assert power4.isOver([
[0],
[0],
[1],
[1],
[1],
[1]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0],
[0],
[2],
[2],
[2],
[2]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0,0],
[0,0],
[0,1],
[2,1],
[2,1],
[2,1]
]) == True
def test_diagonal():
assert power4.isOver([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 1, 2, 1],
[0, 0, 0, 1, 2, 1, 2],
]) == True | 24.087719 | 58 | 0.405681 |
acee07929998ebc4697451ca085cf4dffce8837a | 491 | py | Python | test/ResultsAndPrizes/6x45/test_6x45_sum_superprize.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | 1 | 2019-12-05T06:50:54.000Z | 2019-12-05T06:50:54.000Z | test/ResultsAndPrizes/6x45/test_6x45_sum_superprize.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | test/ResultsAndPrizes/6x45/test_6x45_sum_superprize.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | # 6из45 + Сумма суперприза
def test_6x45_sum_superprize(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_6x45()
app.ResultAndPrizes.click_sum_superprize()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "СУПЕРПРИЗЫ" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_6x45_superprizes()
app.ResultAndPrizes.comeback_main_page() | 37.769231 | 75 | 0.814664 |
acee0ab764eec92fb82d4cfbcd3819144c4b8140 | 2,097 | py | Python | scheduler.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | 1 | 2018-07-07T11:56:44.000Z | 2018-07-07T11:56:44.000Z | scheduler.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | null | null | null | scheduler.py | vtalks/pipeline | b075836f16157c7096eebc9cfd9c43301e137a61 | [
"Apache-2.0"
] | null | null | null | import logging
import asyncio
import signal
from nats.aio.client import Client as NATS
logger = logging.getLogger(__name__)
class Scheduler:
""" Scheduler instance
"""
# The event loop
event_loop = None
# NATS client instance
nats_client = None
def __init__(self):
self.event_loop = asyncio.get_event_loop()
self.nats_client = NATS()
def signal_handler(self):
""" Shutdown scheduler client gracefully
"""
if self.nats_client.is_closed:
return
logger.info("Closing NATS connection ...")
self.event_loop.create_task(self.nats_client.close())
@asyncio.coroutine
def closed_callback(self):
""" Callback to close NATS clientvconnection.
"""
yield from asyncio.sleep(0.1, loop=self.event_loop)
self.event_loop.stop()
@asyncio.coroutine
def reconnected_callback(self):
""" Callback for NATS clientvreconnections.
"""
logger.info("Connected to NATS at {}...".format(self.nats_client.connected_url.netloc))
async def connect_NATS(self, options):
""" Connect to NATS server.
"""
await self.nats_client.connect(**options)
async def boostrap(self):
""" Create a NATS client and listen for signals to graceful shutdown
the scheduler.
"""
options = {
"servers": ["nats://nats:4222"],
"io_loop": self.event_loop,
"closed_cb": self.closed_callback,
"reconnected_cb": self.reconnected_callback,
}
await self.connect_NATS(options)
# Listen for signals to graceful shutdown
for sig in ('SIGINT', 'SIGTERM'):
self.event_loop.add_signal_handler(getattr(signal, sig), self.signal_handler)
def subscribe(self, subject, callback):
""" Subscribe to message event and assign a callback to execute.
"""
self.event_loop.create_task(self.nats_client.subscribe(subject, cb=callback))
msg = "Subscribe to {:s}".format(subject)
logger.info(msg) | 29.957143 | 95 | 0.630424 |
acee0ca67feb8ac4de260fa9f866ce74b90e7f8e | 588 | py | Python | scrapy_heroku/scheduler.py | Keystone-Strategy/kslabs-scrapy-heroku | 4db794965bf365ac631c67ec32b49331e9e4c38e | [
"BSD-3-Clause"
] | null | null | null | scrapy_heroku/scheduler.py | Keystone-Strategy/kslabs-scrapy-heroku | 4db794965bf365ac631c67ec32b49331e9e4c38e | [
"BSD-3-Clause"
] | null | null | null | scrapy_heroku/scheduler.py | Keystone-Strategy/kslabs-scrapy-heroku | 4db794965bf365ac631c67ec32b49331e9e4c38e | [
"BSD-3-Clause"
] | null | null | null |
from scrapy_heroku.utils import get_spider_queues
from scrapyd.interfaces import ISpiderScheduler
from zope.interface import implementer
@implementer(ISpiderScheduler)
class Psycopg2SpiderScheduler(object):
def __init__(self, config):
self.config = config
self.update_projects()
def schedule(self, project, spider_name, **spider_args):
q = self.queues[project]
q.add(spider_name, **spider_args)
def list_projects(self):
return self.queues.keys()
def update_projects(self):
self.queues = get_spider_queues(self.config) | 28 | 60 | 0.72449 |
acee0ce2161a3c644f07d7d4c903c532f46eea61 | 2,267 | py | Python | src/preprocessor/data_splitter.py | Karunya-Manoharan/High-school-drop-out-prediction | 13bf3f10f2344fb066463fe3f0eaaef6894f01c9 | [
"MIT"
] | null | null | null | src/preprocessor/data_splitter.py | Karunya-Manoharan/High-school-drop-out-prediction | 13bf3f10f2344fb066463fe3f0eaaef6894f01c9 | [
"MIT"
] | null | null | null | src/preprocessor/data_splitter.py | Karunya-Manoharan/High-school-drop-out-prediction | 13bf3f10f2344fb066463fe3f0eaaef6894f01c9 | [
"MIT"
] | null | null | null | from typing import Any, List, Tuple
import pandas as pd
from parse import yamlobj
from preprocessor.base import Base
@yamlobj("!YearSplitter")
class YearSplitter(Base):
"""Preprocessor that splits data into train and test sets based on column
values."""
def __init__(self, train_len: int, gap: int, test_len: int,
year_col: int) -> 'YearSplitter':
"""
train_len: length of train set in years
test_len: length of test set in years
gap: gap between train and test set in years
year_col: column for setting year to split on
"""
self.train_len = train_len
self.test_len = test_len
self.gap = gap
self.year_col = year_col
def transform(
self, df: pd.DataFrame, labels: pd.DataFrame
) -> List[Tuple[int, pd.DataFrame, pd.DataFrame]]:
df['Year'] = df[self.year_col]
min_year, max_year = df['Year'].min(), df['Year'].max()
assert self.train_len + self.gap + self.test_len <= max_year - min_year + 1, f'Not enough data for train test split, min year: {min_year}, max_year: {max_year}'
start_year = min_year + self.train_len + self.gap
end_year = max_year - self.test_len + 1
splits = []
for year in range(start_year, end_year + 1):
train_start = year - self.gap - self.train_len
train_end = train_start + self.train_len - 1
train_indices = df[(df['Year'] >= train_start)
& (df['Year'] <= train_end)].index.unique()
train_set = (df.loc[train_indices, :].copy(),
labels.loc[train_indices, :].copy())
test_end = year + self.test_len - 1
test_indices = df[(df['Year'] >= year)
& (df['Year'] <= test_end)].index.unique()
test_set = (df.loc[test_indices, :].copy(),
labels.loc[test_indices, :].copy())
splits.append((year, train_set, test_set))
assert not train_set[0].index.duplicated().any()
assert train_set[0].index.is_unique
assert not test_set[0].index.duplicated().any()
assert test_set[0].index.is_unique
return splits
| 40.482143 | 168 | 0.582267 |
acee0cfb1fee91a1046e423b95bfb3e99fbfaf0f | 4,061 | py | Python | horizon/decorators.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | null | null | null | horizon/decorators.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | null | null | null | horizon/decorators.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
General-purpose decorators for use with Horizon.
"""
import functools
from django.utils.translation import gettext_lazy as _
def _current_component(view_func, dashboard=None, panel=None):
"""Sets the currently-active dashboard and/or panel on the request."""
@functools.wraps(view_func, assigned=functools.WRAPPER_ASSIGNMENTS)
def dec(request, *args, **kwargs):
if dashboard:
request.horizon['dashboard'] = dashboard
if panel:
request.horizon['panel'] = panel
return view_func(request, *args, **kwargs)
return dec
def require_auth(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
from horizon.exceptions import NotAuthenticated
@functools.wraps(view_func, assigned=functools.WRAPPER_ASSIGNMENTS)
def dec(request, *args, **kwargs):
if request.user.is_authenticated:
return view_func(request, *args, **kwargs)
raise NotAuthenticated(_("Please log in to continue."))
return dec
def require_perms(view_func, required):
"""Enforces permission-based access controls.
:param list required: A tuple of permission names, all of which the request
user must possess in order access the decorated view.
Example usage::
from horizon.decorators import require_perms
@require_perms(['foo.admin', 'foo.member'])
def my_view(request):
...
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
requirements are not met.
"""
from horizon.exceptions import NotAuthorized
# We only need to check each permission once for a view, so we'll use a set
current_perms = getattr(view_func, '_required_perms', set([]))
view_func._required_perms = current_perms | set(required)
@functools.wraps(view_func, assigned=functools.WRAPPER_ASSIGNMENTS)
def dec(request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.has_perms(view_func._required_perms):
return view_func(request, *args, **kwargs)
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
# If we don't have any permissions, just return the original view.
if required:
return dec
return view_func
def require_component_access(view_func, component):
"""Perform component can_access check to access the view.
:param component containing the view (panel or dashboard).
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
user cannot access the component containing the view.
By example the check of component policy rules will be applied to its
views.
"""
from horizon.exceptions import NotAuthorized
@functools.wraps(view_func, assigned=functools.WRAPPER_ASSIGNMENTS)
def dec(request, *args, **kwargs):
if not component.can_access({'request': request}):
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
return view_func(request, *args, **kwargs)
return dec
| 35.622807 | 79 | 0.693179 |
acee0d665eecc65c7a832102ba3d9ace0efd1916 | 244 | py | Python | Aula14/ex08.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | 1 | 2020-11-28T14:48:03.000Z | 2020-11-28T14:48:03.000Z | Aula14/ex08.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | null | null | null | Aula14/ex08.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | null | null | null | print('Gerador de PA')
print('-='*10)
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razao da PA: '))
termo = primeiro
cont = 1
while cont <= 10:
print(f'{termo} \032 ', end='')
termo += razao
cont += 1
print('FIM') | 22.181818 | 41 | 0.586066 |
acee0dd98cca40f2f419594660caac5c5c962110 | 4,175 | py | Python | main.py | W-Glenton/Checkers-Game-With-MiniMax-AI | 3f8625bdc1ba712aaf9ab551e7c285075c3ac96f | [
"MIT"
] | null | null | null | main.py | W-Glenton/Checkers-Game-With-MiniMax-AI | 3f8625bdc1ba712aaf9ab551e7c285075c3ac96f | [
"MIT"
] | null | null | null | main.py | W-Glenton/Checkers-Game-With-MiniMax-AI | 3f8625bdc1ba712aaf9ab551e7c285075c3ac96f | [
"MIT"
] | null | null | null | from board import Board
from piece import Piece
from move import Move
from copy import deepcopy
import random
board=Board(8)
def minimax(board,depth,alpha,beta,maximum):
boardcopy=deepcopy(board)#copy board to avoid changing game board object
if depth==0 or board.gameOver():#base case
return board.score(),0
if maximum:
bestmove=[]
val=float('-inf')
for move in boardcopy.getvalidmoves("w"):#get all valid moves for white
boardcopy=deepcopy(board)
move[0].execute(boardcopy,move[1])
value,prevailingmove=minimax(boardcopy,depth-1,True,True,False)#recurse for next set of moves
if val ==value:#if values are the same, pick a random one so game isn't deterministic
i=random.randint(0,1)
if i == 0:
bestmove=move[0]
if val<value:#if value greater, return the value
val=value
alpha=value
bestmove=move[0]
if alpha >= beta:
break
return val,bestmove
else:
bestmove=[]
val=float('inf')
for move in boardcopy.getvalidmoves("b"):#get all valid moves for black
boardcopy=deepcopy(board)
move[0].execute(boardcopy,move[1])
value,prevailingmove=minimax(boardcopy,depth-1,True,True,True)
if val ==value:#if values are the same, pick a random one so game isn't deterministic
i=random.randint(0,1)
if i == 0:
bestmove=move[0]
if val>value:#if value greater, return the value
val=value
beta=value
bestmove=move[0]
if beta<=alpha:
break
return val,bestmove
new = Board(8)#initialise new board
new.populate()
new.display()
depth=0
ease=int(input("Select difficulty:\n1.Easy\n2.Medium\n3.Hard\nEnter the number of your choice: "))#select difficulty
if ease == 1:
depth=2
elif ease == 2:
depth=5
elif ease == 3:
depth =7
else:
print("restart program and enter valid number")
game=True
while game and depth:#game loop
correct=False
while correct==False:#loop in case of invalid input
x1=int(input("X1: "))
y1=int(input("y1: "))
x2=int(input("X2: "))
y2=int(input("Y1: "))
move=Move(x1,y1,x2,y2)
move.translateCoOrds()
valid,skips,string =move.validate("w",new)
if valid:
correct=True
else:
print(string)#print reason why not valid input
if valid:
move.execute(new,skips)#execute player move
new.display()
print(new.score())
end=new.gameOver()#if game over end loop
if end=="b":
print("Black wins!")
break
elif end=="w":
print("White wins!")
break
else:
print(string)
val,move=minimax(new,depth,float('-inf'),float('inf'),False)#AI search for best move
valid,skips,string=move.validate("b",new)#validate move and get skips
if valid:#if valid execute
move.execute(new,skips)
else:
print(string)
new.display()#display board after AI move
print(new.score())
end=new.gameOver()#if game over break out of loop
if end=="b":
print("Black wins!")
break
elif end=="w":
print("White wins!")
break
#I made the below code out of interest, to run two minimax AIs against each other
"""for i in range(0,100):
val,move=minimax(new,5,float('-inf'),float('inf'),False)
if type(move)==type([]):
break
valid,skips,string=move.validate("b",new,False)
if valid:
move.execute(new,skips)
print(new.score())
else:
print(string)
new.display()
val,move=minimax(new,5,float('-inf'),float('inf'),True)
if type(move)==type([]):
break
valid,skips,string=move.validate("w",new,False)
if valid:
move.execute(new,skips)
print(new.score())
else:
print(string)
new.display()"""
| 27.467105 | 117 | 0.573892 |
acee0ddb8038a9efd0e9b13f0959e1ad6bffdd0e | 310 | py | Python | p2_continuous-control/networks/utils.py | drah/deep-reinforcement-learning | 1ccc3681088975926b70680dbe93cf6448b94017 | [
"MIT"
] | 1 | 2021-01-02T03:18:49.000Z | 2021-01-02T03:18:49.000Z | p2_continuous-control/networks/utils.py | drah/deep-reinforcement-learning | 1ccc3681088975926b70680dbe93cf6448b94017 | [
"MIT"
] | null | null | null | p2_continuous-control/networks/utils.py | drah/deep-reinforcement-learning | 1ccc3681088975926b70680dbe93cf6448b94017 | [
"MIT"
] | null | null | null | import torch
import numpy as np
def make_tensor(values) -> torch.Tensor:
if isinstance(values, np.ndarray) and values.dtype is np.float32:
values = torch.from_numpy(values)
elif not isinstance(values, torch.Tensor):
values = torch.tensor(values, dtype=torch.float32)
return values
| 31 | 69 | 0.716129 |
acee0e2fd5c2a17b3efd182d010d1ea9f6d6bb22 | 10,259 | py | Python | salt/states/tomcat.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2021-08-14T13:48:38.000Z | 2021-08-14T13:48:38.000Z | salt/states/tomcat.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 3 | 2015-03-31T14:44:05.000Z | 2015-06-18T19:02:24.000Z | salt/states/tomcat.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
Manage Apache Tomcat web applications
=====================================
.. note::
This state requires the Tomcat Manager webapp to be installed and running.
The following grains/pillars must be set for communication with Tomcat Manager
to work:
.. code-block:: yaml
tomcat-manager:
user: 'tomcat-manager'
passwd: 'Passw0rd'
Configuring Tomcat Manager
--------------------------
To manage webapps via the Tomcat Manager, you'll need to configure
a valid user in the file ``conf/tomcat-users.xml``.
.. code-block:: xml
:caption: conf/tomcat-users.xml
<?xml version='1.0' encoding='utf-8'?>
<tomcat-users>
<role rolename="manager-script"/>
<user username="tomcat-manager" password="Passw0rd" roles="manager-script"/>
</tomcat-users>
Notes
-----
- Using multiple versions (aka. parallel deployments) on the same context
path is not supported.
- More information about the Tomcat Manager:
http://tomcat.apache.org/tomcat-7.0-doc/manager-howto.html
- If you use only this module for deployments you might want to restrict
access to the manager so it's only accessible via localhost.
For more info: http://tomcat.apache.org/tomcat-7.0-doc/manager-howto.html#Configuring_Manager_Application_Access
- Last tested on:
Tomcat Version:
Apache Tomcat/7.0.54
JVM Vendor:
Oracle Corporation
JVM Version:
1.8.0_101-b13
OS Architecture:
amd64
OS Name:
Linux
OS Version:
3.10.0-327.22.2.el7.x86_64
'''
from __future__ import absolute_import
# Private
def __virtual__():
'''
Load if the module tomcat exists
'''
return 'tomcat' if 'tomcat.status' in __salt__ else False
# Functions
def war_deployed(name,
war,
force=False,
url='http://localhost:8080/manager',
timeout=180,
temp_war_location=None,
version=True):
'''
Enforce that the WAR will be deployed and started in the context path,
while making use of WAR versions in the filename.
.. note::
For more info about Tomcats file paths and context naming, please see
http://tomcat.apache.org/tomcat-7.0-doc/config/context.html#Naming
name
The context path to deploy (incl. forward slash) the WAR to.
war
Absolute path to WAR file (should be accessible by the user running
Tomcat) or a path supported by the ``salt.modules.cp.get_url`` function.
force : False
Force deployment even if the version strings are the same.
Disabled by default.
url : http://localhost:8080/manager
The URL of the Tomcat Web Application Manager.
timeout : 180
Timeout for HTTP requests to the Tomcat Manager.
temp_war_location : None
Use another location to temporarily copy the WAR file to.
By default the system's temp directory is used.
version : ''
Specify the WAR version. If this argument is provided, it overrides
the version encoded in the WAR file name, if one is present.
.. versionadded:: 2015.8.6
Use ``False`` or blank value to prevent guessing the version and keeping it blank.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
jenkins:
tomcat.war_deployed:
- name: /salt-powered-jenkins
- war: salt://jenkins-1.2.4.war
- require:
- service: application-service
.. note::
Be aware that in the above example the WAR ``jenkins-1.2.4.war`` will
be deployed to the context path ``salt-powered-jenkins##1.2.4``. To avoid this
either specify a version yourself, or set version to ``False``.
'''
# Prepare
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# if version is defined or False, we don't want to overwrite
if version is True:
version = __salt__['tomcat.extract_war_version'](war) or ''
elif not version:
version = ''
webapps = __salt__['tomcat.ls'](url, timeout)
deploy = False
undeploy = False
status = True
# Gathered/specified new WAR version string
specified_ver = 'version {0}'.format(version) if version else 'no version'
# Determine what to do
try:
# Printed version strings, here to throw exception if no webapps[name]
current_ver = 'version ' + webapps[name]['version'] \
if webapps[name]['version'] else 'no version'
# `endswith` on the supposed string will cause Exception if empty
if (not webapps[name]['version'].endswith(version)
or (version == '' and webapps[name]['version'] != version)
or force):
deploy = True
undeploy = True
ret['changes']['undeploy'] = ('undeployed {0} with {1}'.
format(name, current_ver))
ret['changes']['deploy'] = ('will deploy {0} with {1}'.
format(name, specified_ver))
else:
deploy = False
ret['comment'] = ('{0} with {1} is already deployed'.
format(name, specified_ver))
if webapps[name]['mode'] != 'running':
ret['changes']['start'] = 'starting {0}'.format(name)
status = False
else:
return ret
except Exception:
deploy = True
ret['changes']['deploy'] = ('deployed {0} with {1}'.
format(name, specified_ver))
# Test
if __opts__['test']:
ret['result'] = None
return ret
# make sure the webapp is up if deployed
if deploy is False:
if status is False:
ret['comment'] = __salt__['tomcat.start'](name, url,
timeout=timeout)
ret['result'] = ret['comment'].startswith('OK')
return ret
# Undeploy
if undeploy:
un = __salt__['tomcat.undeploy'](name, url, timeout=timeout)
if un.startswith('FAIL'):
ret['result'] = False
ret['comment'] = un
return ret
# Deploy
deploy_res = __salt__['tomcat.deploy_war'](war,
name,
'yes',
url,
__env__,
timeout,
temp_war_location=temp_war_location,
version=version)
# Return
if deploy_res.startswith('OK'):
ret['result'] = True
ret['comment'] = str(__salt__['tomcat.ls'](url, timeout)[name])
ret['changes']['deploy'] = ('deployed {0} with {1}'.
format(name, specified_ver))
else:
ret['result'] = False
ret['comment'] = deploy_res
ret['changes'].pop('deploy')
return ret
def wait(name, url='http://localhost:8080/manager', timeout=180):
'''
Wait for the Tomcat Manager to load.
Notice that if tomcat is not running we won't wait for it start and the
state will fail. This state can be required in the tomcat.war_deployed
state to make sure tomcat is running and that the manager is running as
well and ready for deployment.
url : http://localhost:8080/manager
The URL of the server with the Tomcat Manager webapp.
timeout : 180
Timeout for HTTP request to the Tomcat Manager.
Example:
.. code-block:: yaml
tomcat-service:
service.running:
- name: tomcat
- enable: True
wait-for-tomcatmanager:
tomcat.wait:
- timeout: 300
- require:
- service: tomcat-service
jenkins:
tomcat.war_deployed:
- name: /ran
- war: salt://jenkins-1.2.4.war
- require:
- tomcat: wait-for-tomcatmanager
'''
result = __salt__['tomcat.status'](url, timeout)
ret = {'name': name,
'result': result,
'changes': {},
'comment': ('tomcat manager is ready' if result
else 'tomcat manager is not ready')
}
return ret
def mod_watch(name, url='http://localhost:8080/manager', timeout=180):
'''
The tomcat watcher function.
When called it will reload the webapp in question
'''
msg = __salt__['tomcat.reload'](name, url, timeout)
result = msg.startswith('OK')
ret = {'name': name,
'result': result,
'changes': {name: result},
'comment': msg
}
return ret
def undeployed(name,
url='http://localhost:8080/manager',
timeout=180):
'''
Enforce that the WAR will be undeployed from the server
name
The context path to undeploy.
url : http://localhost:8080/manager
The URL of the server with the Tomcat Manager webapp.
timeout : 180
Timeout for HTTP request to the Tomcat Manager.
Example:
.. code-block:: yaml
jenkins:
tomcat.undeployed:
- name: /ran
- require:
- service: application-service
'''
# Prepare
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if not __salt__['tomcat.status'](url, timeout):
ret['comment'] = 'Tomcat Manager does not respond'
ret['result'] = False
return ret
try:
version = __salt__['tomcat.ls'](url, timeout)[name]['version']
ret['changes'] = {'undeploy': version}
except KeyError:
return ret
# Test
if __opts__['test']:
ret['result'] = None
return ret
undeploy = __salt__['tomcat.undeploy'](name, url, timeout=timeout)
if undeploy.startswith('FAIL'):
ret['result'] = False
ret['comment'] = undeploy
return ret
return ret
| 29.997076 | 114 | 0.559216 |
acee0e68f876d6a55ca86a9768a7a197cea0070c | 241 | py | Python | TemplateDemo/TemplateDemo/urls.py | KiralyTamas/Django | d0df4bcffa499c25602e2672c230226440879f6d | [
"MIT"
] | null | null | null | TemplateDemo/TemplateDemo/urls.py | KiralyTamas/Django | d0df4bcffa499c25602e2672c230226440879f6d | [
"MIT"
] | null | null | null | TemplateDemo/TemplateDemo/urls.py | KiralyTamas/Django | d0df4bcffa499c25602e2672c230226440879f6d | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from templatesapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('firsttemplate/', views.renderTemplate),
path('empinfo/', views.renderEmployee)
] | 26.777778 | 49 | 0.738589 |
acee0fa23a74d29026c6bfca84cae344d55eb697 | 7,624 | py | Python | baselines/jft/input_utils_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | baselines/jft/input_utils_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | baselines/jft/input_utils_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the input pipeline utilities used in the ViT experiments."""
import os
import pathlib
import tempfile
from absl import logging
from absl.testing import parameterized
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
import input_utils # local file import from baselines.jft
class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
# Go two directories up to the root of the UB directory.
ub_root_dir = pathlib.Path(__file__).parents[2]
data_dir = str(ub_root_dir) + "/.tfds/metadata"
logging.info("data_dir contents: %s", os.listdir(data_dir))
self.data_dir = data_dir
def test_get_num_examples(self):
dataset_name = "imagenet21k"
split = "full[:10]+full[20:24]"
process_count = 3
process_batch_size = 1
num_examples_drop = input_utils.get_num_examples(
dataset_name,
split=split,
process_batch_size=process_batch_size,
drop_remainder=True,
process_count=process_count,
data_dir=self.data_dir)
self.assertEqual(num_examples_drop, 12)
num_examples_no_drop = input_utils.get_num_examples(
dataset_name,
split=split,
process_batch_size=process_batch_size,
drop_remainder=False,
process_count=process_count,
data_dir=self.data_dir)
self.assertEqual(num_examples_no_drop, 14)
process_batch_size = 3
num_examples_drop = input_utils.get_num_examples(
dataset_name,
split=split,
process_batch_size=process_batch_size,
drop_remainder=True,
process_count=process_count,
data_dir=self.data_dir)
self.assertEqual(num_examples_drop, 9)
num_examples_no_drop = input_utils.get_num_examples(
dataset_name,
split=split,
process_batch_size=process_batch_size,
drop_remainder=False,
process_count=process_count,
data_dir=self.data_dir)
self.assertEqual(num_examples_no_drop, 14)
# TODO(dusenberrymw): tfds.testing.mock_data ignores sub-splits. File a bug so
# that sub-splits can be fully tested with mocked data.
# NOTE: These numbers are simply being used to test for determinism.
@parameterized.parameters(
(0, 1, 575047232.0, 804.0, 191682400.0, 268.0),
(0, 3, 191682400.0, 268.0, 191682416.0, 268.0),
(1, 3, 191682400.0, 268.0, 191682416.0, 268.0),
)
def test_get_data(self, process_index, process_count, correct_train_image_sum,
correct_train_labels_sum, correct_val_image_sum,
correct_val_labels_sum):
rng = jax.random.PRNGKey(42)
dataset = "imagenet21k"
train_split = "full[:10]"
val_split = "full[:10]"
num_classes = 21843
batch_size = 3
shuffle_buffer_size = 20
def _get_num_examples(ds):
def _reduce_fn(count, batch):
x = tf.reshape(batch["image"], [-1, 224, 224, 3])
if "mask" in batch:
mask = tf.reshape(batch["mask"], [-1])
x = tf.boolean_mask(x, mask)
return count + tf.shape(x)[0]
return int(ds.reduce(0, _reduce_fn))
def preprocess_fn(example):
image = tf.io.decode_image(
example["image"], channels=3, expand_animations=False)
image = tf.image.resize(image, [224, 224])
labels = tf.reduce_max(
tf.one_hot(example["labels"], depth=num_classes), axis=0)
return {"image": image, "labels": labels}
rng, train_rng = jax.random.split(rng)
process_batch_size = batch_size // process_count
with tfds.testing.mock_data(num_examples=10, data_dir=self.data_dir):
train_ds = input_utils.get_data(
dataset,
split=train_split,
rng=train_rng,
process_batch_size=process_batch_size,
preprocess_fn=preprocess_fn,
cache="loaded",
shuffle_buffer_size=shuffle_buffer_size,
prefetch_size=2,
data_dir=self.data_dir,
process_index=process_index,
process_count=process_count)
train_ds_1_epoch = input_utils.get_data(
dataset,
split=train_split,
rng=train_rng,
process_batch_size=process_batch_size,
preprocess_fn=preprocess_fn,
cache="loaded",
num_epochs=1,
shuffle_buffer_size=shuffle_buffer_size,
prefetch_size=2,
data_dir=self.data_dir,
process_index=process_index,
process_count=process_count)
val_ds = input_utils.get_data(
dataset,
split=val_split,
rng=None,
process_batch_size=process_batch_size,
preprocess_fn=preprocess_fn,
cache="loaded",
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
shuffle_buffer_size=shuffle_buffer_size,
prefetch_size=2,
drop_remainder=False,
data_dir=self.data_dir,
process_index=process_index,
process_count=process_count)
batch_dims = (jax.local_device_count(),
process_batch_size // jax.local_device_count())
train_batch = next(iter(train_ds))
self.assertEqual(train_batch["image"].shape, batch_dims + (224, 224, 3))
self.assertEqual(train_batch["labels"].shape, batch_dims + (num_classes,))
# Check that examples are dropped or not.
self.assertEqual(
_get_num_examples(train_ds_1_epoch),
input_utils.get_num_examples(
dataset,
split=train_split,
process_batch_size=process_batch_size,
data_dir=self.data_dir))
self.assertEqual(
_get_num_examples(val_ds),
input_utils.get_num_examples(
dataset,
split=val_split,
process_batch_size=process_batch_size,
drop_remainder=False,
data_dir=self.data_dir))
# Test for determinism.
def reduction_fn(state, batch):
prev_image_sum, prev_labels_sum = state
image_sum = tf.math.reduce_sum(batch["image"])
labels_sum = tf.math.reduce_sum(batch["labels"])
return (image_sum + prev_image_sum, labels_sum + prev_labels_sum)
train_image_sum, train_labels_sum = train_ds.take(10).reduce((0., 0.),
reduction_fn)
val_image_sum, val_labels_sum = val_ds.take(10).reduce((0., 0.),
reduction_fn)
logging.info(
"(train_image_sum, train_labels_sum, val_image_sum, "
"val_labels_sum) = %s, %s, %s, %s", float(train_image_sum),
float(train_labels_sum), float(val_image_sum), float(val_labels_sum))
self.assertAllClose(train_image_sum, correct_train_image_sum)
self.assertAllClose(train_labels_sum, correct_train_labels_sum)
self.assertAllClose(val_image_sum, correct_val_image_sum)
self.assertAllClose(val_labels_sum, correct_val_labels_sum)
if __name__ == "__main__":
tf.test.main()
| 35.296296 | 80 | 0.667104 |
acee0fb817831df9d7f82d97e50bf58636725179 | 77 | py | Python | #!/usr/bin/python/delete.py | xccvv/pret | a9e411149f39bf3325a2c7696bc4c4dddb379eec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python/delete.py | xccvv/pret | a9e411149f39bf3325a2c7696bc4c4dddb379eec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python/delete.py | xccvv/pret | a9e411149f39bf3325a2c7696bc4c4dddb379eec | [
"Apache-2.0"
] | 1 | 2018-10-28T09:29:10.000Z | 2018-10-28T09:29:10.000Z | #!/usr/bin/python
import os
# Delete file test2.txt
os.delete("text2.txt")
| 12.833333 | 23 | 0.701299 |
acee10f74ad817c183f684f451bbe45cca173d0b | 4,662 | py | Python | test/test_bin.py | sj-curtin/GenSON | 723f06e8881a86309fd0fffaf752980f292a513f | [
"MIT"
] | 377 | 2015-01-15T13:25:51.000Z | 2022-03-31T23:59:38.000Z | test/test_bin.py | sj-curtin/GenSON | 723f06e8881a86309fd0fffaf752980f292a513f | [
"MIT"
] | 54 | 2015-02-10T16:26:46.000Z | 2022-01-21T00:02:46.000Z | test/test_bin.py | sj-curtin/GenSON | 723f06e8881a86309fd0fffaf752980f292a513f | [
"MIT"
] | 55 | 2015-06-10T01:50:40.000Z | 2022-03-11T12:54:16.000Z | import unittest
import json
import os
from subprocess import Popen, PIPE
from genson import SchemaBuilder
BASE_SCHEMA = {"$schema": SchemaBuilder.DEFAULT_URI}
FIXTURE_PATH = os.path.join(os.path.dirname(__file__), 'fixtures')
SHORT_USAGE = """\
usage: genson [-h] [--version] [-d DELIM] [-e ENCODING] [-i SPACES]
[-s SCHEMA] [-$ SCHEMA_URI]
..."""
def fixture(filename):
return os.path.join(FIXTURE_PATH, filename)
def stderr_message(message):
return '{}\ngenson: error: {}\n'.format(SHORT_USAGE, message)
def run(args=[], stdin_data=None):
"""
Run the ``genson`` executable as a subprocess and return
(stdout, stderr).
"""
genson_process = Popen(
['python', '-m', 'genson'] + args, stdout=PIPE, stderr=PIPE,
stdin=PIPE if stdin_data is not None else None)
if stdin_data is not None:
stdin_data = stdin_data.encode('utf-8')
(stdout, stderr) = genson_process.communicate(stdin_data)
genson_process.wait()
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8')
if isinstance(stderr, bytes):
stderr = stderr.decode('utf-8')
return (stdout, stderr)
class TestBasic(unittest.TestCase):
def test_empty_input(self):
(stdout, stderr) = run(stdin_data='')
self.assertEqual(stderr, '')
self.assertEqual(json.loads(stdout), BASE_SCHEMA)
def test_empty_object_stdin(self):
(stdout, stderr) = run(stdin_data='{}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "object"}, **BASE_SCHEMA))
def test_empty_object_file(self):
(stdout, stderr) = run([fixture('empty.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
BASE_SCHEMA)
def test_basic_schema_file(self):
(stdout, stderr) = run(['-s', fixture('base_schema.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
BASE_SCHEMA)
class TestError(unittest.TestCase):
maxDiff = 1000
BAD_JSON_FILE = fixture('not_json.txt')
BAD_JSON_MESSAGE = stderr_message(
'invalid JSON in %s: Expecting value: line 1 column 1 (char 0)'
% BAD_JSON_FILE)
def test_no_input(self):
(stdout, stderr) = run()
self.assertEqual(stderr, stderr_message(
'noting to do - no schemas or objects given'))
self.assertEqual(stdout, '')
def test_object_not_json(self):
(stdout, stderr) = run([self.BAD_JSON_FILE])
self.assertEqual(stderr, self.BAD_JSON_MESSAGE)
self.assertEqual(stdout, '')
def test_schema_not_json(self):
(stdout, stderr) = run(['-s', self.BAD_JSON_FILE])
self.assertEqual(stderr, self.BAD_JSON_MESSAGE)
self.assertEqual(stdout, '')
class TestDelimiter(unittest.TestCase):
def test_delim_newline(self):
(stdout, stderr) = run(['-d', 'newline'],
stdin_data='{"hi":"there"}\n{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
def test_delim_auto_empty(self):
(stdout, stderr) = run(['-d', ''], stdin_data='{"hi":"there"}{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
def test_delim_auto_whitespace(self):
(stdout, stderr) = run(['-d', ''],
stdin_data='{"hi":"there"} \n\t{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
class TestEncoding(unittest.TestCase):
def test_encoding_unicode(self):
(stdout, stderr) = run(
['-e', 'utf-8', fixture('utf-8.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "string"}, **BASE_SCHEMA))
def test_encoding_cp1252(self):
(stdout, stderr) = run(
['-e', 'cp1252', fixture('cp1252.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "string"}, **BASE_SCHEMA))
| 33.06383 | 79 | 0.582583 |
acee12157a42b19b5e4992311b9f53d7c0ccbb80 | 18,128 | py | Python | VyParse.py | Command-Master/Vyxal | 2a1fd535c786dcbce2796360931c994438777cca | [
"MIT"
] | 1 | 2021-05-26T02:00:14.000Z | 2021-05-26T02:00:14.000Z | VyParse.py | Command-Master/Vyxal | 2a1fd535c786dcbce2796360931c994438777cca | [
"MIT"
] | null | null | null | VyParse.py | Command-Master/Vyxal | 2a1fd535c786dcbce2796360931c994438777cca | [
"MIT"
] | null | null | null | class stringlib:
ascii_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
NAME = "CONSTANT_TOKEN_NAME"
VALUE = "CONSTANT_TOKEN_VALUE"
IF_STMT = "STRUCTURE_IF"
FOR_STMT = "STRUCTURE_FOR"
WHILE_STMT = "STRUCTURE_WHILE"
FUNCTION_STMT = "STRUCTURE_FUNCTION"
LAMBDA_STMT = "STRUCTURE_LAMBDA"
NO_STMT = "STRUCTURE_NONE"
STRING_STMT = "STRUCTURE_STRING"
INTEGER = "STRUCTURE_INTEGER"
CHARACTER = "STRUCTURE_CHARACTER"
LAMBDA_STMT = "LAMBDA_STMT"
LAMBDA_MAP = "LAMBDA_MAP"
LAMBDA_FILTER = "LAMBDA_FILTER"
LAMBDA_SORT = "LAMBDA_SORT"
LIST_STMT = "LIST_STMT"
VARIABLE_GET = "VARIABLE_GET"
VARIABLE_SET = "VARIABLE_SET"
FUNCTION_REFERENCE = "FUNCTION_REFERENCE"
COMPRESSED_NUMBER = "COMPRESSED_NUMBER"
COMPRESSED_STRING = "COMPRESSED_STRING"
VARIABLES = [VARIABLE_GET, VARIABLE_SET]
STRING_CONTENTS = "string_contents"
INTEGER_CONTENTS = "integer_contents"
IF_ON_TRUE = "if_on_true"
IF_ON_FALSE = "if_on_false"
FOR_VARIABLE = "for_variable"
FOR_BODY = "for_body"
WHILE_CONDITION = "while_condition"
WHILE_BODY = "while_body"
FUNCTION_NAME = "function_name"
FUNCTION_BODY = "function_body"
LAMBDA_BODY = "lambda_body"
LIST_ITEM = "list_item"
LIST_ITEMS = "list_items"
VARIABLE_NAME = "variable_name"
LAMBDA_ARGUMENTS = "lambda_arguments"
COMPRESSED_NUMBER_VALUE = "compressed_number_value"
COMPRESSED_STRING_VALUE = "compressed_string_value"
TWO_CHAR_STUFF = "two_char_data_idk"
ONE = "one"
TWO = "two"
THREE = "three"
FOUR = "four"
ONE_CHARS = "kv⁽∆ø⁺Þ¨&~ß‘"
TWO_CHARS = "₌‡₍"
CONSTANT_CHAR = "k"
VECTORISATION_CHAR = "v"
SINGLE_SCC_CHAR = "‛"
CODEPAGE_INDEX = "⁺"
ONE_CHAR_FUNCTION_REFERENCE = "⁽"
TWO_BYTE_MATH = "∆"
TWO_BYTE_STRING = "ø"
TWO_BYTE_LIST = "Þ"
TWO_BYTE_MISC = "¨"
STRING_DELIMITER = "`"
REGISTER_MODIFIER = "&"
ONE_BYTE_DICTIONARY = "‘"
DONT_POP = "~"
CONDITIONAL_EXECUTION = "ß"
PARA_APPLY = "₌"
PARA_APPLY_COLLECT = "₍"
TWO_CHAR_LAMBDA = "‡"
THREE_CHAR_LAMBDA = "≬"
DECIMAL = "."
OPEN_CLOSE_SAME = ["`", "«", "»"]
OPENING = {
NO_STMT: "",
IF_STMT: "[",
FOR_STMT: "(",
WHILE_STMT: "{",
FUNCTION_STMT: "@",
LAMBDA_STMT: "λ",
LAMBDA_MAP: "ƛ",
LAMBDA_FILTER: "'",
LAMBDA_SORT: "µ",
LIST_STMT: "⟨",
FUNCTION_REFERENCE: "°",
COMPRESSED_NUMBER: "»",
COMPRESSED_STRING: "«"
}
inv_OPENING = {v: k for k,v in OPENING.items()}
CLOSING = {
NO_STMT: "",
IF_STMT: "]",
FOR_STMT: ")",
WHILE_STMT: "}",
FUNCTION_STMT: ";",
LAMBDA_STMT: ";",
LAMBDA_MAP: ";",
LAMBDA_FILTER: ";",
LAMBDA_SORT: ";",
LIST_STMT: "⟩",
FUNCTION_REFERENCE: ";",
COMPRESSED_NUMBER: "»",
COMPRESSED_STRING: "«"
}
inv_CLOSING = {v: k for k,v in CLOSING.items()}
DEFAULT_KEYS = {
IF_STMT: IF_ON_TRUE,
FOR_STMT: FOR_BODY,
WHILE_STMT: WHILE_BODY,
INTEGER: INTEGER_CONTENTS,
FUNCTION_STMT: FUNCTION_NAME,
LAMBDA_STMT: LAMBDA_BODY,
LAMBDA_MAP: LAMBDA_BODY,
LAMBDA_FILTER: LAMBDA_BODY,
LAMBDA_SORT: LAMBDA_BODY,
LIST_STMT: LIST_ITEM,
FUNCTION_REFERENCE: FUNCTION_NAME,
COMPRESSED_NUMBER: COMPRESSED_NUMBER_VALUE,
COMPRESSED_STRING: COMPRESSED_STRING_VALUE
}
class Token:
def __init__(self, name: str, value: object):
self.name = name
self.value = value
def __getitem__(self, key: int):
if key in (0, NAME):
return self.name
elif key in (1, VALUE):
return self.value
else:
raise IndexError("Token value not in the range of 0/1")
def __str__(self):
return str(self.name) + "|" + str(self.value)
def group_strings(program):
out = []
temp = ""
escaped = False
STANDARD, INTEGER, ALPHA = "`", "»", "«"
flux_string = [False, "", STANDARD] # [in_string, temp_string, string_type]
for char in program:
if flux_string[0]:
if escaped:
if char == STANDARD:
flux_string[1] = flux_string[1][:-1]
flux_string[1] += char
escaped = False
elif char == flux_string[2]:
out.append([flux_string[1], flux_string[2]])
flux_string = [False, "", STANDARD]
elif char in "\\":
escaped = True
flux_string[1] += char
else:
flux_string[1] += char
elif escaped:
escaped = False
out.append(char)
elif char in (STANDARD, INTEGER, ALPHA):
flux_string[0] = True
flux_string[1] = ""
flux_string[2] = char
elif char in "\\⁺":
escaped = True
out.append(char)
else:
out.append(char)
if flux_string[0]:
out.append([flux_string[1], flux_string[2]])
return out
def group_two_bytes(code):
# To be called after group_strings
ret = []
temp = ""
escaped = False
TWO_BYTE_DELIMS = "k∆øÞ¨"
for item in code:
if type(item) is list:
ret.append(item)
elif escaped:
escaped = False
ret.append(item)
elif item in "\\⁺":
escaped = True
ret.append(item)
elif temp:
ret.append(temp + item)
temp = ""
elif item in TWO_BYTE_DELIMS:
temp = item
else:
ret.append(item)
return ret
def Tokenise(source: str):
tokens = []
structure = NO_STMT
structure_data = {}
default_key = ""
escaped = comment = False
active_key = ""
scc_mode, scc = False, ""
nest_level = 0
vectorisation = False
bracket_stack = []
# print(source)
for char in source:
# print(char, structure, structure_data, escaped, nest_level)
if comment:
if char == "\n":
comment = False
continue
if escaped:
if structure != NO_STMT:
structure_data[active_key] += "\\" + char
else:
tokens.append(Token(CHARACTER, char))
escaped = False
continue
elif type(char) is list:
if structure not in [NO_STMT, INTEGER, VARIABLE_GET, VARIABLE_SET]:
structure_data[active_key] += char[1] + char[0] + char[1]
else:
if structure == INTEGER:
value = structure_data[active_key]
end = value.find(".", value.find(".") + 1)
if end > -1:
value = value[:end]
if value.isnumeric():
this_token = Token(INTEGER, int(value))
else:
this_token = Token(INTEGER, float(value))
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
elif structure in VARIABLES:
this_token = Token(structure, structure_data)
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
active_key = ""
default_key = ""
yes = ({"`": STRING_STMT,
"«": COMPRESSED_STRING,
"»": COMPRESSED_NUMBER
}[char[1]], {"`": STRING_CONTENTS,
"«": COMPRESSED_STRING_VALUE,
"»": COMPRESSED_NUMBER_VALUE}[char[1]])
tokens.append(Token(yes[0], {yes[1]: char[0]}))
continue
elif structure == INTEGER:
if char in "0123456789.":
structure_data[INTEGER_CONTENTS] += char
continue
else:
value = structure_data[active_key]
end = value.find(".", value.find(".") + 1)
if end > -1:
value = value[:end]
if value.isnumeric():
this_token = Token(INTEGER, int(value))
else:
this_token = Token(INTEGER, float(value))
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
elif structure in VARIABLES:
if char in stringlib.ascii_letters + "_":
structure_data[active_key] += char
continue
else:
this_token = Token(structure, structure_data)
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
active_key = ""
default_key = ""
elif scc_mode:
scc += char
if len(scc) == 2:
scc_mode = False
this_token = Token(SINGLE_SCC_CHAR, scc)
tokens.append(this_token)
scc = ""
structure = NO_STMT
continue
elif structure in ONE_CHARS:
this_token = Token(structure, char)
tokens.append(this_token)
structure = NO_STMT
continue
elif structure == TWO_CHAR_LAMBDA:
if len(structure_data[active_key]) == 1:
tokens.append(Token(LAMBDA_STMT, {LAMBDA_BODY: "".join(structure_data[active_key] + [char])}))
structure = NO_STMT
structure_data = {}
else:
structure_data[active_key].append(char)
continue
elif structure in TWO_CHARS:
if len(structure_data[active_key]) == 1:
tokens.append(Token(structure, structure_data[active_key][0] + char))
structure = NO_STMT
structure_data = {}
else:
structure_data[active_key] = [char]
continue
elif structure == THREE_CHAR_LAMBDA:
if len(structure_data[active_key]) == 2:
tokens.append(Token(LAMBDA_STMT, {LAMBDA_BODY: "".join(structure_data[active_key] + [char])}))
structure = NO_STMT
structure_data = {}
else:
structure_data[active_key].append(char)
continue
if char == "\\":
escaped = True
continue
if char in OPENING.values():
if nest_level:
if char not in OPEN_CLOSE_SAME:
nest_level += 1
structure_data[active_key] += char
continue
elif char == OPENING[IF_STMT]:
structure = IF_STMT
active_key = IF_ON_TRUE
elif char == OPENING[WHILE_STMT]:
structure = WHILE_STMT
active_key = WHILE_CONDITION
elif char == OPENING[FOR_STMT]:
structure = FOR_STMT
active_key = FOR_VARIABLE
elif char == OPENING[FUNCTION_STMT]:
structure = FUNCTION_STMT
active_key = FUNCTION_NAME
elif char == OPENING[LAMBDA_STMT]:
structure = LAMBDA_STMT
active_key = LAMBDA_BODY
elif char == OPENING[LAMBDA_MAP]:
structure = LAMBDA_MAP
active_key = LAMBDA_BODY
elif char == OPENING[LAMBDA_FILTER]:
structure = LAMBDA_FILTER
active_key = LAMBDA_BODY
elif char == OPENING[LAMBDA_SORT]:
structure = LAMBDA_SORT
active_key = LAMBDA_BODY
elif char == OPENING[LIST_STMT]:
structure = LIST_STMT
active_key = LIST_ITEM
structure_data[LIST_ITEMS] = []
elif char == OPENING[FUNCTION_REFERENCE]:
structure = FUNCTION_REFERENCE
active_key = FUNCTION_NAME
elif char == OPENING[COMPRESSED_NUMBER]:
structure = COMPRESSED_NUMBER
active_key = COMPRESSED_NUMBER_VALUE
elif char == OPENING[COMPRESSED_STRING]:
structure = COMPRESSED_STRING
active_key = COMPRESSED_STRING_VALUE
else:
raise NotImplementedError("That structure isn't implemented yet")
structure_data[active_key] = ""
nest_level += 1
default_key = DEFAULT_KEYS[structure]
elif char in CLOSING.values():
nest_level -= 1
if nest_level > 0:
structure_data[active_key] += char
else:
additional_token = None
if structure == LAMBDA_MAP:
additional_token = Token(NO_STMT, "M")
structure = LAMBDA_STMT
elif structure == LAMBDA_FILTER:
additional_token = Token(NO_STMT, "F")
structure = LAMBDA_STMT
elif structure == LAMBDA_SORT:
additional_token = Token(NO_STMT, "ṡ")
structure = LAMBDA_STMT
elif structure == LIST_STMT:
structure_data[LIST_ITEMS].append(structure_data[LIST_ITEM])
del structure_data[LIST_ITEM]
else:
if default_key not in structure_data and structure != NO_STMT:
structure_data[default_key] = structure_data[active_key]
del structure_data[active_key]
this_token = Token(structure, structure_data)
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
if additional_token:
tokens.append(additional_token)
elif char == "|" and nest_level == 1:
# Oh, the magical pipe which makes Vyxal and Keg unique
if structure == IF_STMT:
active_key = IF_ON_FALSE
elif structure == WHILE_STMT:
active_key = WHILE_BODY
elif structure == FOR_STMT:
active_key = FOR_BODY
elif structure == FUNCTION_STMT:
active_key = FUNCTION_BODY
elif structure == LAMBDA_STMT:
structure_data[LAMBDA_ARGUMENTS] = structure_data[LAMBDA_BODY]
active_key = LAMBDA_BODY
elif structure == LIST_STMT:
structure_data[LIST_ITEMS].append(structure_data[active_key])
structure_data[active_key] = ""
elif structure != NO_STMT:
structure_data[active_key] += char
elif char in "0123456789.":
structure = INTEGER
structure_data[INTEGER_CONTENTS] = char
active_key = INTEGER_CONTENTS
default_key = DEFAULT_KEYS[INTEGER]
elif char == "→":
structure = VARIABLE_SET
structure_data[VARIABLE_NAME] = ""
active_key = VARIABLE_NAME
default_key = VARIABLE_NAME
elif char == "←":
structure = VARIABLE_GET
structure_data[VARIABLE_NAME] = ""
active_key = VARIABLE_NAME
default_key = VARIABLE_NAME
elif char == VECTORISATION_CHAR:
vectorisation = True
continue
elif char in TWO_CHARS:
char_mode = TWO
structure = char
active_key = TWO_CHAR_STUFF
structure_data[active_key] = []
elif char == THREE_CHAR_LAMBDA:
char_mode = THREE
structure = THREE_CHAR_LAMBDA
active_key = LAMBDA_BODY
structure_data[active_key] = []
elif char in ONE_CHARS:
char_mode = ONE
structure = char
elif char == SINGLE_SCC_CHAR:
scc_mode = True
elif char == "#":
comment = True
continue
else:
if vectorisation:
tokens.append(Token(VECTORISATION_CHAR, char))
vectorisation = False
else:
if len(char) == 2:
tokens.append(Token(char[0], char[1]))
else:
this_token = Token(NO_STMT, char)
tokens.append(this_token)
if structure != NO_STMT:
# print(structure_data, default_key, active_key)
additional_token = None
if structure == LAMBDA_MAP:
additional_token = Token(NO_STMT, "M")
structure = LAMBDA_STMT
elif structure == LAMBDA_FILTER:
additional_token = Token(NO_STMT, "F")
structure = LAMBDA_STMT
elif structure == LAMBDA_SORT:
additional_token = Token(NO_STMT, "ṡ")
structure = LAMBDA_STMT
elif structure == LIST_STMT:
structure_data[LIST_ITEMS].append(structure_data[LIST_ITEM])
del structure_data[LIST_ITEM]
elif structure == INTEGER:
value = structure_data[default_key]
end = value.find(".", value.find(".") + 1)
if end > -1:
value = value[:end]
if value.isnumeric():
structure_data = int(value)
else:
structure_data = float(value)
else:
if default_key not in structure_data:
structure_data[default_key] = structure_data[active_key]
del structure_data[active_key]
this_token = Token(structure, structure_data)
tokens.append(this_token)
structure_data = {}
structure = NO_STMT
if additional_token:
tokens.append(additional_token)
return tokens
if __name__ == "__main__":
# tests = ["«S⊍ǐ/µȦġk*∪±c*ɖøW₌≤₀e+₇ /)ðaðc~²⊍λġOṙŻZ⁽ɽẇ¼∴ðḂ>⁰IŻ↳Y%⁼ǐ∩\\ǔḞo⁋$∪@ø₇↑^V×Qc□„&<$↲AFðM‟[Ẏ`∵∪SĊ⟩%IHṠλ!q⟩»ꜝ∩=ẏ¼≥ȧ(ε∑²Z₁Ẇġ@Ḃ9d@3ġf₇Ṗꜝµ∞†≥¨ǐ $*∆⇩nTǎ√7Ḃ«"]
tests = ["123.456`hello`789 42→x`world` ←x", "‡∆p-Ẋ1=", "‘ab", "‡∆p-Ẋ1=", "‡ab", "`\\``", "‡kAkA", "vøD"]
for test in tests:
print([(n[0], n[1]) for n in Tokenise(group_two_bytes(group_strings(test)))])
input()
| 29.815789 | 163 | 0.543855 |
acee124751effae92f96a369d23cac023280496a | 659 | py | Python | migrations/versions/0056.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 1 | 2018-10-12T15:04:31.000Z | 2018-10-12T15:04:31.000Z | migrations/versions/0056.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 169 | 2017-11-07T00:45:25.000Z | 2022-03-12T00:08:59.000Z | migrations/versions/0056.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 1 | 2019-08-15T14:51:31.000Z | 2019-08-15T14:51:31.000Z | """empty message
Revision ID: 0056 update numeric precision
Revises: 0055 update payment_total price
Create Date: 2021-09-27 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0056 update numeric precision'
down_revision = '0055 update payment_total price'
from alembic import op
def upgrade():
op.execute("ALTER TABLE tickets ALTER COLUMN price TYPE numeric(5,2);")
op.execute("ALTER TABLE orders ALTER COLUMN payment_total TYPE numeric(6,2);")
def downgrade():
op.execute("ALTER TABLE tickets ALTER COLUMN price TYPE numeric(2,0);")
op.execute("ALTER TABLE orders ALTER COLUMN payment_total TYPE numeric(2,0);")
| 28.652174 | 82 | 0.751138 |
acee12cea534585486304849aee10fb620301cd3 | 4,662 | py | Python | texar/modules/encoders/bert_encoders_test.py | weiwei718/texar | 1fe1b46a58941641d83560d0ecefd2517729c643 | [
"Apache-2.0"
] | 1 | 2021-06-23T19:47:50.000Z | 2021-06-23T19:47:50.000Z | texar/modules/encoders/bert_encoders_test.py | mihirpurwar/texar | b4ac155a554db4c82a0b09ea36e85d64b8802644 | [
"Apache-2.0"
] | null | null | null | texar/modules/encoders/bert_encoders_test.py | mihirpurwar/texar | b4ac155a554db4c82a0b09ea36e85d64b8802644 | [
"Apache-2.0"
] | null | null | null | #
"""
Unit tests for Bert encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from texar.modules.encoders.bert_encoders import BertEncoder
class BertEncoderTest(tf.test.TestCase):
"""Tests :class:`~texar.modules.BertEncoder` class.
"""
def test_hparams(self):
"""Tests the priority of the encoder arch parameter.
"""
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])
# case 1: set "pretrained_mode_name" by constructor argument
hparams = {
"pretrained_model_name": "bert-large-uncased",
}
encoder = BertEncoder(pretrained_model_name="bert-base-uncased",
hparams=hparams)
_, _ = encoder(inputs)
self.assertEqual(encoder.hparams.encoder.num_blocks, 12)
# case 2: set "pretrained_mode_name" by hparams
hparams = {
"pretrained_model_name": "bert-large-uncased",
"encoder": {
"num_blocks": 6
}
}
encoder = BertEncoder(hparams=hparams)
_, _ = encoder(inputs)
self.assertEqual(encoder.hparams.encoder.num_blocks, 24)
# case 3: set to None in both hparams and constructor argument
hparams = {
"pretrained_model_name": None,
"encoder": {
"num_blocks": 6
},
}
encoder = BertEncoder(hparams=hparams)
_, _ = encoder(inputs)
self.assertEqual(encoder.hparams.encoder.num_blocks, 6)
# case 4: using default hparams
encoder = BertEncoder()
_, _ = encoder(inputs)
self.assertEqual(encoder.hparams.encoder.num_blocks, 12)
def test_trainable_variables(self):
"""Tests the functionality of automatically collecting trainable
variables.
"""
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])
# case 1: bert base
encoder = BertEncoder()
_, _ = encoder(inputs)
self.assertEqual(len(encoder.trainable_variables), 3+2+12*16+2)
# case 2: bert large
hparams = {
"pretrained_model_name": "bert-large-uncased"
}
encoder = BertEncoder(hparams=hparams)
_, _ = encoder(inputs)
self.assertEqual(len(encoder.trainable_variables), 3+2+24*16+2)
# case 3: self-designed bert
hparams = {
"encoder": {
"num_blocks": 6,
},
"pretrained_model_name": None
}
encoder = BertEncoder(hparams=hparams)
_, _ = encoder(inputs)
self.assertEqual(len(encoder.trainable_variables), 3+2+6*16+2)
def test_encode(self):
"""Tests encoding.
"""
# case 1: bert base
encoder = BertEncoder()
max_time = 8
batch_size = 16
inputs = tf.random_uniform([batch_size, max_time],
maxval=30521, dtype=tf.int32)
outputs, pooled_output = encoder(inputs)
outputs_dim = encoder.hparams.encoder.dim
pooled_output_dim = encoder.hparams.hidden_size
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs_, pooled_output_ = sess.run([outputs, pooled_output])
self.assertEqual(outputs_.shape, (batch_size,
max_time, outputs_dim))
self.assertEqual(pooled_output_.shape, (batch_size,
pooled_output_dim))
# case 2: self-designed bert
hparams = {
"hidden_size": 100,
"pretrained_model_name": None
}
encoder = BertEncoder(hparams=hparams)
max_time = 8
batch_size = 16
inputs = tf.random_uniform([batch_size, max_time],
maxval=30521, dtype=tf.int32)
outputs, pooled_output = encoder(inputs)
outputs_dim = encoder.hparams.encoder.dim
pooled_output_dim = encoder.hparams.hidden_size
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs_, pooled_output_ = sess.run([outputs, pooled_output])
self.assertEqual(outputs_.shape, (batch_size,
max_time, outputs_dim))
self.assertEqual(pooled_output_.shape,
(batch_size, pooled_output_dim))
if __name__ == "__main__":
tf.test.main()
| 32.830986 | 73 | 0.583441 |
acee15fd52db193d994c1b94401286d3a507d9ac | 1,759 | py | Python | pytorch_superpixpool/suppixpool_layer.py | bermanmaxim/superpixPool | b6d7812542a2a0f4206115fb51ca816e853ca3bb | [
"MIT"
] | 126 | 2018-06-08T01:02:50.000Z | 2022-03-31T06:35:57.000Z | pytorch_superpixpool/suppixpool_layer.py | bermanmaxim/superpixPool | b6d7812542a2a0f4206115fb51ca816e853ca3bb | [
"MIT"
] | 12 | 2018-09-16T03:01:47.000Z | 2021-08-23T01:54:35.000Z | pytorch_superpixpool/suppixpool_layer.py | bermanmaxim/superpixPool | b6d7812542a2a0f4206115fb51ca816e853ca3bb | [
"MIT"
] | 26 | 2018-06-10T18:02:10.000Z | 2021-09-27T07:41:25.000Z | import torch
import suppixpool_CUDA as spx_gpu
import numpy as np
class SupPixPoolFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, img, spx):
spx = spx.to(torch.int)
K = spx.max()+1
assert(spx.size()[-2:]==img.size()[-2:])
# print(np.all(np.arange(K)==np.unique(spx.cpu().numpy())))
# print "used K: ", K
out = spx_gpu.forward(img, spx, K)
outputs, indices = out
# print("(max, min) indices: ", indices.max(), indices.min())
# print("number of -1: ", indices.eq(-1).sum())
# print indices
# assert np.all(indices.cpu().numpy()>=0)
ctx.save_for_backward(indices, img, spx, K)
return outputs
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
indices, img, spx, K = ctx.saved_tensors
grad_input, = spx_gpu.backward(grad_output.contiguous(), img, spx, indices, K)
return grad_input, torch.zeros_like(spx)
class SupPixPool(torch.nn.Module):
def __init__(self):
super(SupPixPool, self).__init__()
def forward(self, img, spx):
return SupPixPoolFunction.apply(img, spx)
class SupPixUnpool(torch.nn.Module):
def __init__(self):
super(SupPixUnpool, self).__init__()
def forward(self, pooled, spx):
outShape = pooled.size()[0:2]+spx.size()[-2:]
out = pooled.new_zeros(outShape)
for batch in xrange(pooled.size()[0]):
out[batch, :, :, :] = pooled[batch, :, spx[batch,:,:]]
return out
| 35.18 | 86 | 0.603752 |
acee1619ef2e5fbe1e6bfbeea459fc33263e43ab | 1,513 | py | Python | test/test_byok8s.py | charlesreid1/2019-snakemake-byok8s | ebbba235a1da39f3a1e5e99c24e13077c1edd980 | [
"MIT"
] | 9 | 2019-03-11T15:08:28.000Z | 2021-09-05T03:53:19.000Z | test/test_byok8s.py | charlesreid1/2019-snakemake-byok8s | ebbba235a1da39f3a1e5e99c24e13077c1edd980 | [
"MIT"
] | 4 | 2019-01-22T08:35:31.000Z | 2021-06-01T23:19:21.000Z | test/test_byok8s.py | charlesreid1/2019-snakemake-byok8s | ebbba235a1da39f3a1e5e99c24e13077c1edd980 | [
"MIT"
] | null | null | null | from unittest import TestCase
from subprocess import call, Popen, PIPE
import os
import shutil, tempfile
from os.path import isdir, join
"""
test byok8s
This tests the byok8s command line utility,
and assumes you have already set up your
k8s cluster using e.g. minikube.
"""
class TestByok8s(TestCase):
"""
simple byok8s test class
This uses the subprocess PIPE var
to capture system input and output,
since we are running byok8s from the
command line directly using subprocess.
"""
@classmethod
def setUpClass(self):
"""
set up a byok8s workflow test.
"""
# verify that a kubernetes cluster is running
pass
def test_alpha(self):
"""
test alpha workflow
"""
workflows = ['workflow-alpha','workflow-gamma','workflow-zeta']
params = ['params-red','params-blue']
pwd = os.path.abspath(os.path.dirname(__file__))
for workflow in workflows:
for param in params:
command = ['byok8s',workflow,param]
p = Popen(command, cwd=pwd, stdout=PIPE, stderr=PIPE).communicate()
p_out = p[0].decode('utf-8').strip()
p_err = p[1].decode('utf-8').strip()
self.assertIn('details',p_out)
# clean up
call(['rm','-f','*.txt'])
@classmethod
def tearDownClass(self):
"""
clean up after the tests
"""
pass
| 22.58209 | 83 | 0.575677 |
acee165fcc67a36fc74336a51e5d768e2a575eae | 820 | py | Python | api_study/apps/goods/filters.py | shidashui/django_restful_api_study | 4957bbfb39ea16e4760d2b943578753a7183e266 | [
"MIT"
] | 2 | 2019-11-13T05:05:19.000Z | 2019-11-13T05:08:41.000Z | api_study/apps/goods/filters.py | shidashui/django_restful_api_study | 4957bbfb39ea16e4760d2b943578753a7183e266 | [
"MIT"
] | null | null | null | api_study/apps/goods/filters.py | shidashui/django_restful_api_study | 4957bbfb39ea16e4760d2b943578753a7183e266 | [
"MIT"
] | null | null | null | import django_filters
from .models import Goods
from django.db.models import Q
class GoodsFilter(django_filters.rest_framework.FilterSet):
"""
商品过滤的类
"""
#两个参数,name是要过滤的字段,lookup是执行的行为,’小于等于本地价格‘
pricemin = django_filters.NumberFilter(field_name="shop_price", lookup_expr='gte')
pricemax = django_filters.NumberFilter(field_name="shop_price", lookup_expr='lte')
top_category = django_filters.NumberFilter(field_name="category", method='top_category_filter')
def top_category_filter(self, queryset, value):
#不管当前点击的是一级分类还是二级三级,都能找到
return queryset.filter(Q(category_id=value)|Q(category__parent_category_id=value)|Q(category__parent_category__parent_category_id=value))
class Meta:
model = Goods
fields = ['pricemin', 'pricemax', 'is_hot','is_new'] | 37.272727 | 145 | 0.747561 |
acee16eedec29231fbf9cc18e183b462227c046d | 255 | py | Python | build/classes/scripts/read.py | raihanvaheed/Resu.Me | 43cf591f68e4e4db2cb82f1456b912e6a60ee89a | [
"MIT"
] | 2 | 2021-01-17T23:30:11.000Z | 2021-08-02T18:58:17.000Z | build/classes/scripts/read.py | raihanvaheed/Resu.Me | 43cf591f68e4e4db2cb82f1456b912e6a60ee89a | [
"MIT"
] | null | null | null | build/classes/scripts/read.py | raihanvaheed/Resu.Me | 43cf591f68e4e4db2cb82f1456b912e6a60ee89a | [
"MIT"
] | 1 | 2021-12-26T02:40:07.000Z | 2021-12-26T02:40:07.000Z | import sys
import os
if not sys.argv:
print("need to specify file path")
sys.exit(1)
file_name = sys.argv[1]
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
f = open("{}/{}".format(ROOT_DIR, file_name), "r")
for line in f:
print(line) | 19.615385 | 53 | 0.666667 |
acee1773a51fe004504ff74f45b6c896cfdb8232 | 7,439 | py | Python | common.py | PaperCodeReview/MoCo-TF | 1ea01b2d005de3e030229f79a37135468fa1631e | [
"MIT"
] | 22 | 2020-10-01T10:14:36.000Z | 2022-02-02T12:20:42.000Z | common.py | PaperCodeReview/MoCo-TF | 1ea01b2d005de3e030229f79a37135468fa1631e | [
"MIT"
] | 2 | 2021-06-25T06:06:50.000Z | 2021-11-08T23:43:38.000Z | common.py | PaperCodeReview/MoCo-TF | 1ea01b2d005de3e030229f79a37135468fa1631e | [
"MIT"
] | 4 | 2021-03-03T06:19:45.000Z | 2021-05-20T08:07:50.000Z | import os
import sys
import yaml
import random
import logging
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from datetime import datetime
def check_arguments(args):
assert args.src_path is not None, 'src_path must be entered.'
assert args.data_path is not None, 'data_path must be entered.'
assert args.result_path is not None, 'result_path must be entered.'
return args
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default='v1',
choices=['v1', 'v2', 'lincls'])
parser.add_argument("--freeze", action='store_true')
parser.add_argument("--backbone", type=str, default='resnet50')
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--classes", type=int, default=1000)
parser.add_argument("--img_size", type=int, default=224)
parser.add_argument("--dim", type=int, default=128)
parser.add_argument("--num_negative", type=int, default=65536)
parser.add_argument("--momentum", type=float, default=.999)
parser.add_argument("--weight_decay", type=float, default=0.)
parser.add_argument("--use_bias", action='store_true')
parser.add_argument("--mlp", action='store_true') # v2
parser.add_argument("--shuffle_bn", action='store_true')
parser.add_argument("--steps", type=int, default=0)
parser.add_argument("--epochs", type=int, default=200)
parser.add_argument("--lr", type=float, default=.03)
parser.add_argument("--temperature", type=float, default=0.07)
parser.add_argument("--brightness", type=float, default=0.,
help='0.4')
parser.add_argument("--contrast", type=float, default=0.,
help='0.4')
parser.add_argument("--saturation", type=float, default=0.,
help='0.4')
parser.add_argument("--hue", type=float, default=0.,
help='v1: 0.4 / v2: 0.1') # v1 / v2
parser.add_argument("--checkpoint", action='store_true')
parser.add_argument("--history", action='store_true')
parser.add_argument("--tensorboard", action='store_true')
parser.add_argument("--tb_interval", type=int, default=0)
parser.add_argument("--tb_histogram", type=int, default=0)
parser.add_argument("--lr_mode", type=str, default='exponential',
choices=['constant', 'exponential', 'cosine'],
help="v1 : exponential | v2 : cosine")
parser.add_argument("--lr_value", type=float, default=.1)
parser.add_argument("--lr_interval", type=str, default='120,160')
parser.add_argument('--src_path', type=str, default='.')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--result_path', type=str, default='./result')
parser.add_argument('--snapshot', type=str, default=None)
parser.add_argument("--gpus", type=str, default='-1')
parser.add_argument("--summary", action='store_true')
parser.add_argument("--resume", action='store_true')
parser.add_argument("--ignore-search", type=str, default='')
return check_arguments(parser.parse_args())
def set_seed(SEED=42):
os.environ['PYTHONHASHSEED'] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
logger.addHandler(screen_handler)
return logger
def get_session(args):
assert int(tf.__version__.split('.')[0]) >= 2.0
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
if args.gpus != '-1':
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def create_stamp():
weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
temp = datetime.now()
return "{:02d}{:02d}{:02d}_{}_{:02d}_{:02d}_{:02d}".format(
temp.year % 100,
temp.month,
temp.day,
weekday[temp.weekday()],
temp.hour,
temp.minute,
temp.second,
)
def search_same(args):
search_ignore = ['checkpoint', 'history', 'tensorboard',
'tb_interval', 'snapshot', 'summary',
'src_path', 'data_path', 'result_path',
'resume', 'stamp', 'gpus', 'ignore_search']
if len(args.ignore_search) > 0:
search_ignore += args.ignore_search.split(',')
initial_epoch = 0
stamps = os.listdir(f'{args.result_path}/{args.task}')
for stamp in stamps:
try:
desc = yaml.full_load(
open(f'{args.result_path}/{args.task}/{stamp}/model_desc.yml', 'r'))
except:
continue
flag = True
for k, v in vars(args).items():
if k in search_ignore:
continue
if v != desc[k]:
# if stamp == '210120_Wed_05_19_52':
# print(stamp, k, desc[k], v)
flag = False
break
if flag:
args.stamp = stamp
df = pd.read_csv(
os.path.join(
args.result_path,
f'{args.task}/{args.stamp}/history/epoch.csv'))
if len(df) > 0:
if int(df['epoch'].values[-1]+1) == args.epochs:
print(f'{stamp} Training already finished!!!')
return args, -1
elif np.isnan(df['loss'].values[-1]) or np.isinf(df['loss'].values[-1]):
print('{} | Epoch {:04d}: Invalid loss, terminating training'.format(stamp, int(df['epoch'].values[-1]+1)))
return args, -1
else:
ckpt_list = sorted(
[d.split('.index')[0] for d in os.listdir(
f'{args.result_path}/{args.task}/{args.stamp}/checkpoint') if 'index' in d])
if len(ckpt_list) > 0:
args.snapshot = f'{args.result_path}/{args.task}/{args.stamp}/checkpoint/{ckpt_list[-1]}'
initial_epoch = int(ckpt_list[-1].split('_')[0])
else:
print('{} Training already finished!!!'.format(stamp))
return args, -1
break
return args, initial_epoch | 41.792135 | 128 | 0.536228 |
acee18034b44ddea1618bfffa63b77b51f951353 | 454 | py | Python | 03.Complete Python Developer - Zero to Mastery - AN/01.Python Basics/32.1 Exercise Repl.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 3 | 2020-06-01T04:17:18.000Z | 2020-12-18T03:05:55.000Z | 03.Complete Python Developer - Zero to Mastery - AN/01.Python Basics/32.1 Exercise Repl.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 1 | 2020-04-25T08:01:59.000Z | 2020-04-25T08:01:59.000Z | 03.Complete Python Developer - Zero to Mastery - AN/01.Python Basics/32.1 Exercise Repl.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 7 | 2020-04-26T10:02:36.000Z | 2021-06-08T05:12:46.000Z | #fix this code so that it prints a sorted list of all of our friends (alphabetical).
friends = ['Simon', 'Patty', 'Joy', 'Carrie', 'Amira', 'Chu']
new_friend = ['Stanley']
friends.append(new_friend[0])
print(friends)
friends.sort()
print(friends)
#====================================
friends = ['Simon', 'Patty', 'Joy', 'Carrie', 'Amira', 'Chu']
print(friends)
new_friend = ['Stanley']
friends.extend(new_friend)
friends.sort()
print(friends)
| 18.916667 | 85 | 0.625551 |
acee180a6c3e55643052b439d95a65b073288ac6 | 29,375 | py | Python | tensorflow/python/kernel_tests/rnn_test.py | elielhojman/tensorflow | 163aae337c875efce2518c3cd0fecb61968fe408 | [
"Apache-2.0"
] | 8 | 2017-03-20T12:04:21.000Z | 2021-06-24T20:34:30.000Z | tensorflow/python/kernel_tests/rnn_test.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 4 | 2019-08-14T22:32:51.000Z | 2020-03-09T14:59:18.000Z | tensorflow/python/kernel_tests/rnn_test.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 2 | 2017-03-20T12:10:56.000Z | 2017-11-12T00:15:54.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class UnbalancedOutputRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.test_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.test_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.test_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.zero_state(batch_size, dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f64, 5, 7, 3)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| 38.050518 | 80 | 0.665055 |
acee18345e09e924dea1d01cdf19d9a853f409bd | 3,254 | py | Python | model-optimizer/mo/front/mxnet/nd_to_params.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | 1 | 2021-07-30T17:03:50.000Z | 2021-07-30T17:03:50.000Z | model-optimizer/mo/front/mxnet/nd_to_params.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/front/mxnet/nd_to_params.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2017-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import mxnet as mx
from mo.front.mxnet.extractors.utils import load_params
def save_params_file(model_name: str, args: dict, auxs: dict, iteration_number: int = 0):
pretrained = {}
for key in args:
pretrained["arg:" + key] = args[key]
for key in auxs:
pretrained["aux:" + key] = auxs[key]
save_model_path = '{}-{:04}.params'.format(model_name, iteration_number)
save_model_path = os.path.expanduser(save_model_path)
if os.path.isfile(save_model_path):
os.remove(save_model_path)
mx.nd.save(save_model_path, pretrained)
def add_pretrained_model(pretrained_params: dict, args: dict, pretrained_model: str, iteration_number: int,
input_names: str):
if input_names:
input_names = input_names.split(',')
else:
input_names = 'data'
arg_dict = args
if pretrained_params:
symbol, arg_params, aux_params = mx.model.load_checkpoint(pretrained_model, iteration_number)
arg_names = symbol.list_arguments()
arg_dict = {}
for name in arg_names:
if name in input_names:
continue
key = "arg:" + name
if key in pretrained_params:
arg_dict[name] = pretrained_params[key].copyto(mx.cpu())
del pretrained_params
arg_dict.update(args)
return arg_dict
def build_params_file(nd_prefix_name: str = '', pretrained_model: str = '', input_names: str = ''):
path_wo_ext = '.'.join(pretrained_model.split('.')[:-1])
pretrained_model_name_w_iter = path_wo_ext.split(os.sep)[-1]
pretrained_model_name = '-'.join(path_wo_ext.split('-')[:-1])
iteration_number = int(pretrained_model_name_w_iter.split('-')[-1])
files_dir = os.path.dirname(pretrained_model)
if input_names:
model_params = load_params(pretrained_model, data_names=input_names.split(','))
else:
model_params = load_params(pretrained_model)
pretrained_params = mx.nd.load(pretrained_model) if pretrained_model_name else None
nd_args = mx.nd.load(os.path.join(files_dir, '%s_args.nd' % nd_prefix_name)) if nd_prefix_name else None
nd_auxs = mx.nd.load(os.path.join(files_dir, '%s_auxs.nd' % nd_prefix_name)) if nd_prefix_name else None
nd_args = add_pretrained_model(pretrained_params, nd_args, pretrained_model_name,
iteration_number,
input_names)
model_params._arg_params = nd_args
model_params._aux_params = nd_auxs
model_params._param_names = list(nd_args.keys())
model_params._aux_names = list(nd_auxs.keys())
return model_params
| 37.837209 | 108 | 0.687154 |
acee188b6b63f8911798baa2cfe32eee4347f778 | 279 | py | Python | rpisec/telegram_bot/commands/status.py | marclr/rpi-security | 2f7b39c572c45169fa10a9c571bba9cf5f869254 | [
"MIT"
] | null | null | null | rpisec/telegram_bot/commands/status.py | marclr/rpi-security | 2f7b39c572c45169fa10a9c571bba9cf5f869254 | [
"MIT"
] | 1 | 2021-06-01T23:14:14.000Z | 2021-06-01T23:14:14.000Z | rpisec/telegram_bot/commands/status.py | marclr/rpi-security | 2f7b39c572c45169fa10a9c571bba9cf5f869254 | [
"MIT"
] | null | null | null | def status(bot, update, webcontrol):
chat_id = update.message.chat_id
code, text = webcontrol.execute('detection', 'status')
if code == 200:
bot.sendMessage(chat_id=chat_id, text=text)
else:
bot.sendMessage(chat_id=chat_id, text="Try it later")
| 27.9 | 61 | 0.666667 |
acee190fc597a4ba709e525d830f19d5317853f4 | 5,319 | py | Python | enwiki/draftification_tagger.py | jjmc89-bot/JJMC89_bot | 9dd6f6c7bf60ba322760f4c1d09e30eafb401d93 | [
"MIT"
] | 2 | 2022-01-02T03:23:40.000Z | 2022-01-16T01:42:24.000Z | enwiki/draftification_tagger.py | jjmc89-bot/JJMC89_bot | 9dd6f6c7bf60ba322760f4c1d09e30eafb401d93 | [
"MIT"
] | 1 | 2022-01-02T02:56:33.000Z | 2022-03-01T02:12:41.000Z | enwiki/draftification_tagger.py | jjmc89-bot/jjmc89-bot-scripts | 9dd6f6c7bf60ba322760f4c1d09e30eafb401d93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Tag draftified articles."""
# Author : JJMC89
# License: MIT
from __future__ import annotations
import argparse
import re
from functools import lru_cache
from typing import Any, Generator, Iterable
import pywikibot
from pywikibot.bot import (
_GLOBAL_HELP,
ExistingPageBot,
NoRedirectPageBot,
SingleSiteBot,
)
from pywikibot.pagegenerators import GeneratorFactory, parameterHelp
@lru_cache()
def get_redirects(
pages: frozenset[pywikibot.Page],
) -> frozenset[pywikibot.Page]:
"""Given pages, return all possible titles."""
link_pages = set()
for page in pages:
while page.isRedirectPage():
try:
page = page.getRedirectTarget()
except pywikibot.exceptions.CircularRedirectError:
break
if not page.exists():
continue
link_pages.add(page)
for redirect in page.backlinks(filter_redirects=True):
link_pages.add(redirect)
return frozenset(link_pages)
def has_template(
page: pywikibot.Page,
templates: str | Iterable[pywikibot.Page | str],
) -> bool:
"""
Return True if the page has one of the templates. False otherwise.
:param page: page to check
:param templates: templates to check
"""
if isinstance(templates, str):
templates = [templates]
template_pages = get_redirects(
frozenset(
tpl
if isinstance(tpl, pywikibot.Page)
else pywikibot.Page(page.site, tpl, ns=10)
for tpl in templates
)
)
return bool(template_pages & set(page.templates()))
class DfyTaggerBot(SingleSiteBot, ExistingPageBot, NoRedirectPageBot):
"""Bot to tag draftified articles."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize."""
self.available_options.update( # pylint: disable=no-member
{
"summary": "Add {{{{{tpl}}}}}",
"template": "drafts moved from mainspace",
}
)
super().__init__(**kwargs)
template = self.opt.template
self.add_text = f"\n\n{{{{subst:{template}}}}}"
self.summary = self.opt.summary.format(tpl=template)
def skip_page(self, page: pywikibot.Page) -> bool:
"""Skip non-drafts and drafts with the template."""
if page.namespace() != 118:
pywikibot.warning(f"{page!r} is not a draft.")
return True
if has_template(page, self.opt.template):
pywikibot.warning(f"{page!r} already has the template.")
return True
return super().skip_page(page)
def check_disabled(self) -> None:
"""Check if the task is disabled. If so, quit."""
class_name = self.__class__.__name__
page = pywikibot.Page(
self.site,
f"User:{self.site.username()}/shutoff/{class_name}.json",
)
if page.exists():
content = page.get(force=True).strip()
if content:
pywikibot.error(f"{class_name} disabled:\n{content}")
self.quit()
def treat_page(self) -> None:
"""Process one page."""
self.check_disabled()
self.put_current(
self.current_page.text.strip() + self.add_text,
summary=self.summary,
nocreate=True,
)
def draftified_page_generator(
site: pywikibot.site.BaseSite,
start: pywikibot.Timestamp | None,
) -> Generator[pywikibot.Page, None, None]:
"""
Yield draftified pages based on page moves.
:param site: site to yield page moves from
"""
gen = site.logevents(
logtype="move", namespace=0, start=start, reverse=True
)
for move in gen:
if move.target_ns == 118:
yield move.target_page
def main(*args: str) -> None:
"""Process command line arguments and invoke bot."""
local_args = pywikibot.handle_args(args, do_help=False)
site = pywikibot.Site()
site.login()
gen_factory = GeneratorFactory(site)
script_args = gen_factory.handle_args(local_args)
parser = argparse.ArgumentParser(
description="Tag draftified articles",
epilog=re.sub(
r"\n\n?-help +.+?(\n\n-|\s*$)",
r"\1",
_GLOBAL_HELP + parameterHelp,
flags=re.S,
),
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
"--always",
"-a",
action="store_true",
help="Do not prompt to save changes",
)
parser.add_argument(
"--start",
type=pywikibot.Timestamp.fromISOformat,
help="Timestamp to start from",
metavar="%Y-%m-%dT%H:%M:%SZ",
)
parser.add_argument(
"--summary", help="Edit aummary for the bot", default=argparse.SUPPRESS
)
parser.add_argument(
"--template", help="Template to add", default=argparse.SUPPRESS
)
parsed_args = vars(parser.parse_args(args=script_args))
start = parsed_args.pop("start")
gen = None if gen_factory.gens else draftified_page_generator(site, start)
gen = gen_factory.getCombinedGenerator(gen=gen)
DfyTaggerBot(generator=gen, site=site, **parsed_args).run()
if __name__ == "__main__":
main()
| 30.221591 | 79 | 0.611769 |
acee196767a32893414826fc763afb5e9f081d52 | 19,448 | py | Python | src/transformers/models/dpr/tokenization_dpr.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | 3 | 2022-01-15T08:06:07.000Z | 2022-03-10T07:13:18.000Z | src/transformers/models/dpr/tokenization_dpr.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | null | null | null | src/transformers/models/dpr/tokenization_dpr.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for DPR."""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt",
"facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json",
"facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json",
},
}
QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt",
"facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json",
"facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json",
},
}
READER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt",
"facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json",
"facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json",
},
}
CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
READER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class DPRContextEncoderTokenizer(BertTokenizer):
r"""
Construct a DPRContextEncoder tokenizer.
[`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class DPRQuestionEncoderTokenizer(BertTokenizer):
r"""
Constructs a DPRQuestionEncoder tokenizer.
[`DPRQuestionEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
DPRSpanPrediction = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
CUSTOM_DPR_READER_DOCSTRING = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin:
def __call__(
self,
questions,
titles: Optional[str] = None,
texts: Optional[str] = None,
padding: Union[bool, str] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_attention_mask: Optional[bool] = None,
**kwargs
) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
questions,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
elif titles is None or texts is None:
text_pair = titles if texts is None else texts
return super().__call__(
questions,
text_pair,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
titles = titles if not isinstance(titles, str) else [titles]
texts = texts if not isinstance(texts, str) else [texts]
n_passages = len(titles)
questions = questions if not isinstance(questions, str) else [questions] * n_passages
assert len(titles) == len(
texts
), f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts."
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
encoded_inputs = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
]
}
if return_attention_mask is not False:
attention_mask = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
encoded_inputs["attention_mask"] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(
self,
reader_input: BatchEncoding,
reader_output: DPRReaderOutput,
num_spans: int = 16,
max_answer_length: int = 64,
num_spans_per_passage: int = 4,
) -> List[DPRSpanPrediction]:
"""
Get the span predictions for the extractive Q&A model.
Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
*DPRReaderOutput* is a *Tuple* with:
- **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
spans in the same passage. It corresponds to the sum of the start and end logits of the span.
- **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
- **doc_id**: ``int``` the id of the passage. - **start_index**: `int` the start index of the span
(inclusive). - **end_index**: `int` the end index of the span (inclusive).
Examples:
```python
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="pt",
... )
>>> outputs = model(**encoded_inputs)
>>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
>>> print(predicted_spans[0].text) # best span
```"""
input_ids = reader_input["input_ids"]
start_logits, end_logits, relevance_logits = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len],
end_logits=end_logits[doc_id][passage_offset:sequence_len],
max_answer_length=max_answer_length,
top_spans=num_spans_per_passage,
)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
relevance_score=relevance_logits[doc_id],
doc_id=doc_id,
start_index=start_index,
end_index=end_index,
text=self.decode(sequence_ids[start_index : end_index + 1]),
)
)
if len(nbest_spans_predictions) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(
self,
start_logits: List[int],
end_logits: List[int],
max_answer_length: int,
top_spans: int,
) -> List[DPRSpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
`span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
"""
scores = []
for (start_index, start_score) in enumerate(start_logits):
for (answer_length, end_score) in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
length = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
[
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals
]
):
continue
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
r"""
Construct a DPRReader tokenizer.
[`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = READER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION
model_input_names = ["input_ids", "attention_mask"]
| 50.514286 | 154 | 0.676008 |
acee1b942bd8d85b4f0a197d8af63101d383b1f6 | 338 | py | Python | bireme/institution/field_definitions.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/institution/field_definitions.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/institution/field_definitions.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from django.utils.translation import ugettext_lazy as _
import colander
import deform
import json
field_tag_map = {'cc_code': '607', 'name': '611', 'acronym': '611',
'address': '615', 'mailbox': '616', 'zipcode': '616',
'city': '617', 'state': '618', 'country': '620'
}
| 28.166667 | 70 | 0.559172 |
acee1df55aeedc6b5e2a73187ecb8ddb64584455 | 2,838 | py | Python | cohesity_management_sdk/models/update_antivirus_service_group_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/update_antivirus_service_group_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/update_antivirus_service_group_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.antivirus_service_config_params
class UpdateAntivirusServiceGroupParams(object):
"""Implementation of the 'UpdateAntivirusServiceGroupParams' model.
TODO: type model description here.
Attributes:
antivirus_services (list of AntivirusServiceConfigParams): Specifies
the Antivirus services for this provider.
description (string): Specifies the description of the Antivirus
service group.
id (long|int): Specifies the Id of the Antivirus service group.
is_enabled (bool): Specifies whether the antivirus service group is
enabled or not.
name (string): Specifies the name of the Antivirus service group.
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"name":'name',
"antivirus_services":'antivirusServices',
"description":'description',
"is_enabled":'isEnabled'
}
def __init__(self,
id=None,
name=None,
antivirus_services=None,
description=None,
is_enabled=None):
"""Constructor for the UpdateAntivirusServiceGroupParams class"""
# Initialize members of the class
self.antivirus_services = antivirus_services
self.description = description
self.id = id
self.is_enabled = is_enabled
self.name = name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
name = dictionary.get('name')
antivirus_services = None
if dictionary.get('antivirusServices') != None:
antivirus_services = list()
for structure in dictionary.get('antivirusServices'):
antivirus_services.append(cohesity_management_sdk.models.antivirus_service_config_params.AntivirusServiceConfigParams.from_dictionary(structure))
description = dictionary.get('description')
is_enabled = dictionary.get('isEnabled')
# Return an object of this model
return cls(id,
name,
antivirus_services,
description,
is_enabled)
| 33.388235 | 161 | 0.628259 |
acee1f0c043ce3724e6b141bf2bbfd3f908d8281 | 2,973 | py | Python | chrome/common/extensions/docs/server2/link_converter.py | iplo/Chain | 8bc8943d66285d5258fffc41bed7c840516c4422 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | chrome/common/extensions/docs/server2/link_converter.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | chrome/common/extensions/docs/server2/link_converter.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script converts old-style <a> links to API docs to the new $ref links.
# See reference_resolver.py for more info on the format of $ref links.
import optparse
import os
import re
from docs_server_utils import SanitizeAPIName
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _WriteFile(filename, contents):
with open(filename, 'w') as f:
f.write(contents)
def _Replace(matches, filename):
title = matches.group(3)
if matches.group(2).count('#') != 1:
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
clean = (matches.group(2).replace('\\', '')
.replace("'", '')
.replace('"', '')
.replace('/', ''))
page, link = clean.split('#')
if not page:
page = '%s.html' % SanitizeAPIName(filename.rsplit(os.sep, 1)[-1])
if (not link.startswith('property-') and
not link.startswith('type-') and
not link.startswith('method-') and
not link.startswith('event-')):
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
link = re.sub('^(property|type|method|event)-', '', link).replace('-', '.')
page = page.replace('.html', '.').replace('_', '.')
if matches.group(1) == ' ':
padding = ''
else:
padding = matches.group(1)
if link in title:
return '%s$ref:%s%s' % (padding, page, link)
else:
return '%s$ref:[%s%s %s]' % (padding, page, link, title)
def _ConvertFile(filename, use_stdout):
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
contents = _ReadFile(filename)
contents = re.sub(regex,
lambda m: _Replace(m, filename),
contents)
contents = contents.replace('$ref:extension.lastError',
'$ref:runtime.lastError')
if use_stdout:
print contents
else:
_WriteFile(filename, contents)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Converts <a> links to $ref links.',
usage='usage: %prog [option] <directory>')
parser.add_option('-f', '--file', default='',
help='Convert links in single file.')
parser.add_option('-o', '--out', action='store_true', default=False,
help='Write to stdout.')
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
opts, argv = parser.parse_args()
if opts.file:
_ConvertFile(opts.file, opts.out)
else:
if len(argv) != 1:
parser.print_usage()
exit(0)
for root, dirs, files in os.walk(argv[0]):
for name in files:
_ConvertFile(os.path.join(root, name), opts.out)
| 33.784091 | 77 | 0.574504 |
acee1f152fe26798e03a0591cad2a6c010646283 | 2,613 | py | Python | sharkstruc/readers/phyche.py | sharksmhi/sharkstruc | 6d234f164817332e1589179c497bc82eddac6545 | [
"MIT"
] | null | null | null | sharkstruc/readers/phyche.py | sharksmhi/sharkstruc | 6d234f164817332e1589179c497bc82eddac6545 | [
"MIT"
] | null | null | null | sharkstruc/readers/phyche.py | sharksmhi/sharkstruc | 6d234f164817332e1589179c497bc82eddac6545 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-12-15 14:15
@author: johannes
"""
from pathlib import Path
from sharkstruc.readers.xlsx import PandasXlsxReader
from sharkstruc.readers.txt import PandasTxtReader
class PhysicalChemicalExcelReader(PandasXlsxReader):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class PhysicalChemicalLIMSReader(PandasTxtReader):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.arguments = list(args)
self.files = {}
def load(self, *args, **kwargs):
self._activate_files(*args, **kwargs)
def read_element(self, *args, **kwargs):
return self._read_file(*args, **kwargs)
def _read_file(self, *args, **kwargs):
fid = args[0] if type(args) == tuple else args
if fid in self.files:
if kwargs.get('dtype') == '':
kwargs['dtype'] = str
df = self.read(self.files.get(fid), **kwargs)
df = self.eliminate_empty_rows(df)
df = self._move_qflags_from_data_cells(df)
else:
df = None
print('File {} not found in delivery'.format(fid))
return df
def _move_qflags_from_data_cells(self, df):
qflags = {'<', '>', 'B', 'S', 'E', 'M'}
for key in self.data_columns:
if key in df:
for qf in qflags:
boolean = df[key].str.contains(qf, regex=False)
if boolean.any():
df.loc[boolean, key] = df.loc[boolean, key].str.replace(qf, '')
df.loc[boolean, 'Q_'+key] = qf
return df
def _activate_files(self, *args, **kwargs):
folder_path = Path(args[0]) if type(args) == tuple else Path(args)
if not folder_path.exists:
raise FileNotFoundError('Could not find the given LIMS-directory: {}'.format(folder_path))
if folder_path.name != 'Raw_data':
folder_path = folder_path / 'Raw_data'
for file_name in folder_path.glob('**/*'):
self.files.setdefault(file_name.name, file_name)
if __name__ == '__main__':
p = PhysicalChemicalExcelReader(2, 5, 6, a='g', b=33)
p.load('C:/Arbetsmapp/webmtrl/Format Physical and chemical.xlsx')
# df = p._read_sheet(
# 'Analysinfo',
# header=2,
# sep='\t',
# dtype=str,
# keep_default_na=False,
# )
| 32.6625 | 102 | 0.585151 |
acee200c3aa76b3033cbfeecedc615a3e7487932 | 451 | py | Python | scripts/run_batched_experiment.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | scripts/run_batched_experiment.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | scripts/run_batched_experiment.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | import argparse
import batched_experiment.runner
import batched_experiment.config
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str)
args = parser.parse_args()
config = batched_experiment.config.BatchedExperimentConfiguration.parse_from_file(args.config)
experiment_runner = batched_experiment.runner.ExperimentRunner(config)
experiment_runner.run_experiment()
if __name__ == '__main__':
main()
| 23.736842 | 96 | 0.800443 |
acee2110c26ddf83b45da07eec1e784b361f6c91 | 6,797 | py | Python | lib/roi_data_layer/layerFcnReg.py | someoneAlready/ohem | b7552ceb8ed1e9768e0d522258caa64b79834b54 | [
"BSD-2-Clause"
] | 1 | 2017-01-24T20:41:52.000Z | 2017-01-24T20:41:52.000Z | lib/roi_data_layer/layerFcnReg.py | cgangEE/ohem | b7552ceb8ed1e9768e0d522258caa64b79834b54 | [
"BSD-2-Clause"
] | null | null | null | lib/roi_data_layer/layerFcnReg.py | cgangEE/ohem | b7552ceb8ed1e9768e0d522258caa64b79834b54 | [
"BSD-2-Clause"
] | 1 | 2020-10-09T07:49:03.000Z | 2020-10-09T07:49:03.000Z | # --------------------------------------------------------
# Fast R-CNN with OHEM
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Abhinav Shrivastava
# --------------------------------------------------------
"""The data layer used during training to train a Fast R-CNN network.
RoIDataLayer implements a Caffe Python layer.
"""
import sys
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatchFcnReg import get_minibatch, get_allrois_minibatch, get_ohem_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class RoIDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
return blobs
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
idx = 0
top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3,
max(cfg.TRAIN.SCALES) / 32 * 32, cfg.TRAIN.MAX_SIZE / 32 * 32)
self._name_to_top_map['data'] = idx
idx += 1
top[idx].reshape(14, 1, max(cfg.TRAIN.SCALES) / 32 * 32,
cfg.TRAIN.MAX_SIZE / 32 * 32)
self._name_to_top_map['label'] = idx
idx += 1
top[idx].reshape(1, 28, max(cfg.TRAIN.SCALES) / 32 * 32,
cfg.TRAIN.MAX_SIZE / 32 * 32)
self._name_to_top_map['reg_targets'] = idx
idx += 1
top[idx].reshape(1, 28, max(cfg.TRAIN.SCALES) / 32 * 32,
cfg.TRAIN.MAX_SIZE / 32 * 32)
self._name_to_top_map['reg_inside_weights'] = idx
idx += 1
top[idx].reshape(1, 28, max(cfg.TRAIN.SCALES) / 32 * 32,
cfg.TRAIN.MAX_SIZE / 32 * 32)
self._name_to_top_map['reg_outside_weights'] = idx
idx += 1
print 'RoiDataLayer: name_to_top:', self._name_to_top_map
assert len(top) == len(self._name_to_top_map)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
if cfg.TRAIN.USE_OHEM:
blobs = get_allrois_minibatch(minibatch_db, self._num_classes)
else:
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
| 35.773684 | 99 | 0.596881 |
acee2165f4e9de57208a3aef2ca7e8fc1b042b5b | 51,276 | py | Python | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import copy
import glob
import multiprocessing
import os
import re
import sys
import subprocess
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)
import ch2o_tests
import elichika_tests
import gen_backprop_tests_oc
import gen_backprop_tests_pc
import gen_chainercv_model_tests
import gen_extra_test
import gen_large_tests_oc
import onnx_chainer_tests
import onnx_real_tests
from test_case import TestCase
parser = argparse.ArgumentParser(description='Run tests for chainer_compiler')
parser.add_argument('test_filter', default=None, nargs='?',
help='A regular expression to filter tests')
parser.add_argument('--all', '-a', action='store_true',
help='Run all tests')
parser.add_argument('--build_dir', '-b', default=None,
help='The build directory')
parser.add_argument('--jobs', '-j', type=int,
default=multiprocessing.cpu_count(),
help='Number of parallel jobs')
parser.add_argument('--show_log', action='store_true',
help='Show logs')
parser.add_argument('--skip_build', action='store_true',
help='Skip the build before running tests')
parser.add_argument('--use_gpu', '-g', action='store_true',
help='Run heavy tests with GPU')
parser.add_argument('--device', '-d', default=None,
help='ChainerX device to be used')
parser.add_argument('--use_gpu_all', '-G', action='store_true',
help='Run all tests with GPU')
parser.add_argument('--failed', action='store_true',
help='Run tests which failed last time')
parser.add_argument('--failure_log', default='out/failed_tests.log',
help='The file where names of failed tests are stored')
parser.add_argument('--fuse', action='store_true', help='Enable fusion')
parser.add_argument('--ngraph', action='store_true', help='Enable nGraph')
parser.add_argument('--snpe', action='store_true', help='Enable SNPE')
parser.add_argument('--computation_order', default=None,
help='Force setting --computation_order flag')
parser.add_argument('--cache', action='store_true', help='Enable model caching')
parser.add_argument('--verbose', action='store_true',
help='Run tests with --verbose flag')
args = parser.parse_args()
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
RESET = '\033[0m'
ONNX_TEST_DATA = 'third_party/onnx/onnx/backend/test/data'
NODE_TEST = os.path.join(ONNX_TEST_DATA, 'node')
SIMPLE_TEST = os.path.join(ONNX_TEST_DATA, 'simple')
# ChainerX does not support 1D conv/pool.
fail_1d_conv_pool = args.use_gpu_all
TEST_CASES = [
TestCase(NODE_TEST, 'test_identity'),
TestCase(NODE_TEST, 'test_add'),
TestCase(NODE_TEST, 'test_add_bcast'),
TestCase(NODE_TEST, 'test_sub'),
TestCase(NODE_TEST, 'test_sub_bcast'),
TestCase(NODE_TEST, 'test_sub_example'),
TestCase(NODE_TEST, 'test_mul'),
TestCase(NODE_TEST, 'test_mul_bcast'),
TestCase(NODE_TEST, 'test_mul_example'),
TestCase(NODE_TEST, 'test_div'),
TestCase(NODE_TEST, 'test_div_bcast'),
TestCase(NODE_TEST, 'test_div_example'),
TestCase(NODE_TEST, 'test_mod_broadcast'),
# TODO(hamaji): Support fmod.
# TestCase(NODE_TEST, 'test_mod_int64_fmod'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float16'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float32'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float64'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int16'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int32'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int64'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int8'),
# TODO(hamaji): Unsupported dtypes.
# TestCase(NODE_TEST, 'test_mod_uint16'),
# TestCase(NODE_TEST, 'test_mod_uint32'),
# TestCase(NODE_TEST, 'test_mod_uint64'),
TestCase(NODE_TEST, 'test_mod_uint8'),
TestCase(NODE_TEST, 'test_pow'),
TestCase(NODE_TEST, 'test_pow_bcast_array'),
TestCase(NODE_TEST, 'test_pow_bcast_scalar'),
TestCase(NODE_TEST, 'test_pow_example'),
TestCase(NODE_TEST, 'test_and2d'),
TestCase(NODE_TEST, 'test_and3d'),
TestCase(NODE_TEST, 'test_and4d'),
TestCase(NODE_TEST, 'test_and_bcast3v1d'),
TestCase(NODE_TEST, 'test_and_bcast3v2d'),
TestCase(NODE_TEST, 'test_and_bcast4v2d'),
TestCase(NODE_TEST, 'test_and_bcast4v3d'),
TestCase(NODE_TEST, 'test_and_bcast4v4d'),
TestCase(NODE_TEST, 'test_or2d'),
TestCase(NODE_TEST, 'test_or4d'),
TestCase(NODE_TEST, 'test_or_bcast3v1d'),
TestCase(NODE_TEST, 'test_or3d'),
TestCase(NODE_TEST, 'test_or_bcast4v2d'),
TestCase(NODE_TEST, 'test_or_bcast3v2d'),
TestCase(NODE_TEST, 'test_or_bcast4v3d'),
TestCase(NODE_TEST, 'test_or_bcast4v4d'),
TestCase(NODE_TEST, 'test_xor2d'),
TestCase(NODE_TEST, 'test_xor3d'),
TestCase(NODE_TEST, 'test_xor_bcast3v1d'),
TestCase(NODE_TEST, 'test_xor4d'),
TestCase(NODE_TEST, 'test_xor_bcast3v2d'),
TestCase(NODE_TEST, 'test_xor_bcast4v2d'),
TestCase(NODE_TEST, 'test_xor_bcast4v4d'),
TestCase(NODE_TEST, 'test_xor_bcast4v3d'),
TestCase(NODE_TEST, 'test_neg'),
TestCase(NODE_TEST, 'test_neg_example'),
TestCase(NODE_TEST, 'test_reciprocal'),
TestCase(NODE_TEST, 'test_reciprocal_example'),
TestCase(NODE_TEST, 'test_exp'),
TestCase(NODE_TEST, 'test_exp_example'),
TestCase(NODE_TEST, 'test_log'),
TestCase(NODE_TEST, 'test_log_example'),
TestCase(NODE_TEST, 'test_sqrt'),
TestCase(NODE_TEST, 'test_sqrt_example'),
TestCase(NODE_TEST, 'test_sin'),
TestCase(NODE_TEST, 'test_sin_example'),
TestCase(NODE_TEST, 'test_cos'),
TestCase(NODE_TEST, 'test_cos_example'),
TestCase(NODE_TEST, 'test_tan'),
TestCase(NODE_TEST, 'test_tan_example'),
TestCase(NODE_TEST, 'test_sinh'),
TestCase(NODE_TEST, 'test_sinh_example'),
TestCase(NODE_TEST, 'test_cosh'),
TestCase(NODE_TEST, 'test_cosh_example'),
TestCase(NODE_TEST, 'test_tanh'),
TestCase(NODE_TEST, 'test_tanh_example'),
TestCase(NODE_TEST, 'test_asin'),
TestCase(NODE_TEST, 'test_asin_example'),
TestCase(NODE_TEST, 'test_acos'),
TestCase(NODE_TEST, 'test_acos_example'),
TestCase(NODE_TEST, 'test_atan'),
TestCase(NODE_TEST, 'test_atan_example'),
TestCase(NODE_TEST, 'test_asinh'),
TestCase(NODE_TEST, 'test_asinh_example'),
TestCase(NODE_TEST, 'test_acosh'),
TestCase(NODE_TEST, 'test_acosh_example'),
# TODO(hamaji): Enable these tests once atanh is implemented.
# TestCase(NODE_TEST, 'test_atanh'),
# TestCase(NODE_TEST, 'test_atanh_example'),
TestCase(NODE_TEST, 'test_erf'),
TestCase(NODE_TEST, 'test_abs'),
TestCase(NODE_TEST, 'test_relu'),
TestCase(NODE_TEST, 'test_elu'),
TestCase(NODE_TEST, 'test_elu_default'),
TestCase(NODE_TEST, 'test_elu_example'),
TestCase(NODE_TEST, 'test_leakyrelu'),
TestCase(NODE_TEST, 'test_leakyrelu_default'),
TestCase(NODE_TEST, 'test_leakyrelu_example'),
TestCase(NODE_TEST, 'test_selu'),
TestCase(NODE_TEST, 'test_selu_default'),
TestCase(NODE_TEST, 'test_selu_example'),
TestCase(NODE_TEST, 'test_sigmoid'),
TestCase(NODE_TEST, 'test_sigmoid_example'),
TestCase(NODE_TEST, 'test_floor'),
TestCase(NODE_TEST, 'test_floor_example'),
TestCase(NODE_TEST, 'test_ceil'),
TestCase(NODE_TEST, 'test_ceil_example'),
TestCase(NODE_TEST, 'test_sign'),
TestCase(NODE_TEST, 'test_not_2d'),
TestCase(NODE_TEST, 'test_not_3d'),
TestCase(NODE_TEST, 'test_not_4d'),
TestCase(NODE_TEST, 'test_equal'),
TestCase(NODE_TEST, 'test_equal_bcast'),
TestCase(NODE_TEST, 'test_greater'),
TestCase(NODE_TEST, 'test_greater_bcast'),
TestCase(NODE_TEST, 'test_less'),
TestCase(NODE_TEST, 'test_less_bcast'),
TestCase(NODE_TEST, 'test_constant'),
TestCase(NODE_TEST, 'test_constantofshape_float_ones'),
TestCase(NODE_TEST, 'test_constantofshape_int_zeros'),
TestCase(NODE_TEST, 'test_onehot_with_axis'),
TestCase(NODE_TEST, 'test_onehot_with_negative_axis'),
TestCase(NODE_TEST, 'test_onehot_without_axis'),
TestCase(NODE_TEST, 'test_eyelike_populate_off_main_diagonal'),
TestCase(NODE_TEST, 'test_eyelike_with_dtype'),
TestCase(NODE_TEST, 'test_eyelike_without_dtype'),
TestCase(NODE_TEST, 'test_cast_DOUBLE_to_FLOAT'),
TestCase(NODE_TEST, 'test_cast_DOUBLE_to_FLOAT16'),
TestCase(NODE_TEST, 'test_cast_FLOAT16_to_DOUBLE'),
TestCase(NODE_TEST, 'test_cast_FLOAT16_to_FLOAT'),
TestCase(NODE_TEST, 'test_cast_FLOAT_to_DOUBLE'),
TestCase(NODE_TEST, 'test_cast_FLOAT_to_FLOAT16'),
TestCase(NODE_TEST, 'test_matmul_2d'),
TestCase(NODE_TEST, 'test_matmul_3d'),
TestCase(NODE_TEST, 'test_matmul_4d'),
TestCase(NODE_TEST, 'test_basic_conv_with_padding'),
TestCase(NODE_TEST, 'test_basic_conv_without_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_no_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_and_asymmetric_padding'),
TestCase(NODE_TEST, 'test_convtranspose'),
TestCase(NODE_TEST, 'test_convtranspose_1d', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_convtranspose_3d'),
TestCase(NODE_TEST, 'test_convtranspose_kernel_shape'),
TestCase(NODE_TEST, 'test_convtranspose_output_shape'),
# TODO(hamaji): output_pads is not handled yet.
# TestCase(NODE_TEST, 'test_convtranspose_pad'),
TestCase(NODE_TEST, 'test_convtranspose_pads'),
TestCase(NODE_TEST, 'test_convtranspose_with_kernel'),
TestCase(NODE_TEST, 'test_constant_pad'),
# TODO(hamaji): auto_pad is not supported.
TestCase(NODE_TEST, 'test_maxpool_1d_default', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_maxpool_2d_ceil'),
TestCase(NODE_TEST, 'test_maxpool_2d_default'),
TestCase(NODE_TEST, 'test_maxpool_2d_pads'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_pads'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_same_upper'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_strides'),
TestCase(NODE_TEST, 'test_maxpool_2d_strides'),
TestCase(NODE_TEST, 'test_maxpool_3d_default'),
TestCase(NODE_TEST, 'test_averagepool_1d_default', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_averagepool_2d_default'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_pads'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_pads_count_include_pad'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_strides'),
TestCase(NODE_TEST, 'test_averagepool_2d_strides'),
TestCase(NODE_TEST, 'test_averagepool_2d_pads'),
TestCase(NODE_TEST, 'test_averagepool_2d_pads_count_include_pad'),
TestCase(NODE_TEST, 'test_averagepool_3d_default'),
TestCase(NODE_TEST, 'test_globalmaxpool'),
TestCase(NODE_TEST, 'test_globalmaxpool_precomputed'),
TestCase(NODE_TEST, 'test_globalaveragepool'),
TestCase(NODE_TEST, 'test_globalaveragepool_precomputed'),
TestCase(NODE_TEST, 'test_upsample_nearest'),
# TODO(take-cheeze): Other Resize-11 tests
TestCase(NODE_TEST, 'test_resize_upsample_scales_nearest'),
# The second ROI values mismatch. Let the test pass with
# ridiculously large tolerance.
TestCase(NODE_TEST, 'test_roialign', rtol=0.5, atol=0.5),
TestCase(NODE_TEST, 'test_shape'),
TestCase(NODE_TEST, 'test_shape_example'),
TestCase(NODE_TEST, 'test_size'),
TestCase(NODE_TEST, 'test_size_example'),
TestCase(NODE_TEST, 'test_reshape_extended_dims'),
TestCase(NODE_TEST, 'test_reshape_negative_dim'),
TestCase(NODE_TEST, 'test_reshape_negative_extended_dims'),
TestCase(NODE_TEST, 'test_reshape_one_dim'),
TestCase(NODE_TEST, 'test_reshape_reduced_dims'),
TestCase(NODE_TEST, 'test_reshape_reordered_all_dims'),
TestCase(NODE_TEST, 'test_reshape_reordered_last_dims'),
# TODO(hamaji): Support zero dims in reshape op.
TestCase(NODE_TEST, 'test_reshape_zero_dim', fail=True),
TestCase(NODE_TEST, 'test_reshape_zero_and_negative_dim', fail=True),
TestCase(NODE_TEST, 'test_expand_dim_changed'),
TestCase(NODE_TEST, 'test_expand_dim_unchanged'),
TestCase(NODE_TEST, 'test_squeeze'),
TestCase(NODE_TEST, 'test_squeeze_negative_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_0'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_1'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_2'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_3'),
TestCase(NODE_TEST, 'test_unsqueeze_negative_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_two_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_three_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_unsorted_axes'),
TestCase(NODE_TEST, 'test_flatten_axis0'),
TestCase(NODE_TEST, 'test_flatten_axis1'),
TestCase(NODE_TEST, 'test_flatten_axis2'),
TestCase(NODE_TEST, 'test_flatten_axis3'),
TestCase(NODE_TEST, 'test_flatten_default_axis'),
TestCase(NODE_TEST, 'test_flatten_negative_axis1'),
TestCase(NODE_TEST, 'test_flatten_negative_axis2'),
TestCase(NODE_TEST, 'test_flatten_negative_axis3'),
TestCase(NODE_TEST, 'test_flatten_negative_axis4'),
TestCase(NODE_TEST, 'test_slice'),
TestCase(NODE_TEST, 'test_slice_default_axes'),
TestCase(NODE_TEST, 'test_slice_default_steps'),
TestCase(NODE_TEST, 'test_slice_end_out_of_bounds'),
TestCase(NODE_TEST, 'test_slice_neg'),
TestCase(NODE_TEST, 'test_slice_neg_steps'),
TestCase(NODE_TEST, 'test_slice_negative_axes'),
TestCase(NODE_TEST, 'test_slice_start_out_of_bounds'),
TestCase(NODE_TEST, 'test_gather_0'),
TestCase(NODE_TEST, 'test_gather_1'),
TestCase(NODE_TEST, 'test_gather_elements_0'),
TestCase(NODE_TEST, 'test_gather_elements_1'),
TestCase(NODE_TEST, 'test_gather_elements_negative_indices'),
TestCase(NODE_TEST, 'test_gather_negative_indices'),
TestCase(NODE_TEST, 'test_gathernd_example_int32'),
TestCase(NODE_TEST, 'test_gathernd_example_float32'),
TestCase(NODE_TEST, 'test_scatter_with_axis'),
TestCase(NODE_TEST, 'test_scatter_without_axis'),
TestCase(NODE_TEST, 'test_scatter_elements_with_axis'),
TestCase(NODE_TEST, 'test_scatter_elements_with_negative_indices'),
TestCase(NODE_TEST, 'test_scatter_elements_without_axis'),
TestCase(NODE_TEST, 'test_scatternd'),
TestCase(NODE_TEST, 'test_concat_1d_axis_0'),
TestCase(NODE_TEST, 'test_concat_1d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_0'),
TestCase(NODE_TEST, 'test_concat_2d_axis_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_negative_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_0'),
TestCase(NODE_TEST, 'test_concat_3d_axis_1'),
TestCase(NODE_TEST, 'test_concat_3d_axis_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_3'),
TestCase(NODE_TEST, 'test_split_equal_parts_1d'),
TestCase(NODE_TEST, 'test_split_equal_parts_2d'),
TestCase(NODE_TEST, 'test_split_equal_parts_default_axis'),
TestCase(NODE_TEST, 'test_split_variable_parts_1d'),
TestCase(NODE_TEST, 'test_split_variable_parts_2d'),
TestCase(NODE_TEST, 'test_split_variable_parts_default_axis'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_0'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_1'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_2'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_3'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_4'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_5'),
TestCase(NODE_TEST, 'test_transpose_default'),
TestCase(NODE_TEST, 'test_depthtospace_crd_mode'),
TestCase(NODE_TEST, 'test_depthtospace_crd_mode_example'),
TestCase(NODE_TEST, 'test_depthtospace_dcr_mode'),
TestCase(NODE_TEST, 'test_depthtospace_example'),
TestCase(NODE_TEST, 'test_gemm_all_attributes'),
TestCase(NODE_TEST, 'test_gemm_alpha'),
TestCase(NODE_TEST, 'test_gemm_beta'),
TestCase(NODE_TEST, 'test_gemm_default_matrix_bias'),
TestCase(NODE_TEST, 'test_gemm_default_no_bias'),
TestCase(NODE_TEST, 'test_gemm_default_scalar_bias'),
TestCase(NODE_TEST, 'test_gemm_default_single_elem_vector_bias'),
TestCase(NODE_TEST, 'test_gemm_default_vector_bias'),
TestCase(NODE_TEST, 'test_gemm_default_zero_bias'),
TestCase(NODE_TEST, 'test_gemm_transposeA'),
TestCase(NODE_TEST, 'test_gemm_transposeB'),
TestCase(NODE_TEST, 'test_rnn_seq_length'),
TestCase(NODE_TEST, 'test_simple_rnn_defaults'),
TestCase(NODE_TEST, 'test_simple_rnn_with_initial_bias'),
TestCase(NODE_TEST, 'test_gru_defaults'),
TestCase(NODE_TEST, 'test_gru_seq_length'),
TestCase(NODE_TEST, 'test_gru_with_initial_bias'),
TestCase(NODE_TEST, 'test_lstm_defaults'),
TestCase(NODE_TEST, 'test_lstm_with_initial_bias'),
TestCase(NODE_TEST, 'test_lstm_with_peepholes', rtol=5e-2),
TestCase(NODE_TEST, 'test_softmax_axis_0'),
TestCase(NODE_TEST, 'test_softmax_axis_1'),
TestCase(NODE_TEST, 'test_softmax_axis_2'),
TestCase(NODE_TEST, 'test_softmax_default_axis'),
TestCase(NODE_TEST, 'test_softmax_example'),
TestCase(NODE_TEST, 'test_softmax_large_number'),
TestCase(NODE_TEST, 'test_softmax_negative_axis'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_0'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_1'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_2'),
TestCase(NODE_TEST, 'test_logsoftmax_default_axis'),
TestCase(NODE_TEST, 'test_logsoftmax_example_1'),
TestCase(NODE_TEST, 'test_logsoftmax_large_number', rtol=5e-3),
TestCase(NODE_TEST, 'test_logsoftmax_negative_axis'),
TestCase(NODE_TEST, 'test_softplus'),
TestCase(NODE_TEST, 'test_softplus_example'),
TestCase(NODE_TEST, 'test_softsign'),
TestCase(NODE_TEST, 'test_softsign_example'),
TestCase(NODE_TEST, 'test_sum_example'),
TestCase(NODE_TEST, 'test_sum_one_input'),
TestCase(NODE_TEST, 'test_sum_two_inputs'),
TestCase(NODE_TEST, 'test_mean_example'),
TestCase(NODE_TEST, 'test_mean_one_input'),
TestCase(NODE_TEST, 'test_mean_two_inputs'),
TestCase(NODE_TEST, 'test_max_example'),
TestCase(NODE_TEST, 'test_max_one_input'),
TestCase(NODE_TEST, 'test_max_two_inputs'),
TestCase(NODE_TEST, 'test_min_example'),
TestCase(NODE_TEST, 'test_min_one_input'),
TestCase(NODE_TEST, 'test_min_two_inputs'),
TestCase(NODE_TEST, 'test_clip'),
TestCase(NODE_TEST, 'test_clip_default_inbounds'),
TestCase(NODE_TEST, 'test_clip_default_max'),
TestCase(NODE_TEST, 'test_clip_default_min'),
TestCase(NODE_TEST, 'test_clip_example'),
TestCase(NODE_TEST, 'test_clip_inbounds'),
TestCase(NODE_TEST, 'test_clip_outbounds'),
TestCase(NODE_TEST, 'test_clip_splitbounds'),
TestCase(NODE_TEST, 'test_argmax_default_axis_example'),
TestCase(NODE_TEST, 'test_argmax_default_axis_random'),
TestCase(NODE_TEST, 'test_argmax_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_keepdims_random'),
TestCase(NODE_TEST, 'test_argmax_negative_axis_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_negative_axis_keepdims_random'),
TestCase(NODE_TEST, 'test_argmax_no_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_no_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_default_axis_example'),
TestCase(NODE_TEST, 'test_argmin_default_axis_random'),
TestCase(NODE_TEST, 'test_argmin_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_negative_axis_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_negative_axis_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_no_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_no_keepdims_random'),
TestCase(NODE_TEST, 'test_hardmax_axis_0'),
TestCase(NODE_TEST, 'test_hardmax_axis_1'),
TestCase(NODE_TEST, 'test_hardmax_axis_2'),
TestCase(NODE_TEST, 'test_hardmax_default_axis'),
TestCase(NODE_TEST, 'test_hardmax_example'),
TestCase(NODE_TEST, 'test_hardmax_negative_axis'),
TestCase(NODE_TEST, 'test_hardmax_one_hot'),
TestCase(NODE_TEST, 'test_reduce_l1_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_negative_axes_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_negative_axes_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_negative_axes_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_negative_axes_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum'),
TestCase(NODE_TEST, 'test_reduce_log_sum_asc_axes'),
TestCase(NODE_TEST, 'test_reduce_log_sum_default'),
TestCase(NODE_TEST, 'test_reduce_log_sum_desc_axes'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_negative_axes'),
TestCase(NODE_TEST, 'test_reduce_max_default_axes_keepdim_example'),
TestCase(NODE_TEST, 'test_reduce_max_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_prod_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_prod_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_batchnorm_example'),
TestCase(NODE_TEST, 'test_batchnorm_epsilon'),
TestCase(NODE_TEST, 'test_lrn'),
TestCase(NODE_TEST, 'test_lrn_default'),
TestCase(NODE_TEST, 'test_dropout_default'),
TestCase(NODE_TEST, 'test_dropout_random'),
TestCase(NODE_TEST, 'test_isnan'),
TestCase(NODE_TEST, 'test_isinf'),
TestCase(NODE_TEST, 'test_isinf_negative'),
TestCase(NODE_TEST, 'test_isinf_positive'),
TestCase(NODE_TEST, 'test_where_example'),
TestCase(NODE_TEST, 'test_where_long_example'),
TestCase(NODE_TEST, 'test_nonzero_example'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_suppress_by_IOU'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_center_point_box_format'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_flipped_coordinates'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_identical_boxes'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_single_box'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_suppress_by_IOU_and_scores'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_two_batches'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_limit_output_size'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_two_classes'),
TestCase(NODE_TEST, 'test_top_k'),
TestCase(NODE_TEST, 'test_top_k_negative_axis'),
TestCase(NODE_TEST, 'test_top_k_smallest'),
TestCase(NODE_TEST, 'test_quantizelinear'),
TestCase(NODE_TEST, 'test_dequantizelinear'),
TestCase(NODE_TEST, 'test_qlinearmatmul_2D'),
TestCase(NODE_TEST, 'test_qlinearmatmul_3D'),
TestCase(NODE_TEST, 'test_qlinearconv'),
TestCase(NODE_TEST, 'test_convinteger_with_padding'),
TestCase(NODE_TEST, 'test_basic_convinteger'),
TestCase(NODE_TEST, 'test_matmulinteger'),
TestCase(NODE_TEST, 'test_round'),
TestCase(NODE_TEST, 'test_bitshift_left_uint8'),
TestCase(NODE_TEST, 'test_bitshift_right_uint8'),
# TODO(take-cheeze): Support larger unsigned int types
# TestCase(NODE_TEST, 'test_bitshift_left_uint64'),
# TestCase(NODE_TEST, 'test_bitshift_left_uint32'),
# TestCase(NODE_TEST, 'test_bitshift_left_uint16'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint64'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint32'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint16'),
TestCase(NODE_TEST, 'test_scan9_sum'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_max_adjusted'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_min_adjusted'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_expanded'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_max_adjusted_expanded'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_min_adjusted_expanded'),
TestCase(NODE_TEST, 'test_mvn'),
TestCase(NODE_TEST, 'test_mvn_expanded'),
TestCase(SIMPLE_TEST, 'test_sign_model'),
TestCase(SIMPLE_TEST, 'test_single_relu_model'),
# TODO(hamaji): Come up with a good way to handle ONNX's shape
# inference for sequence types.
TestCase(SIMPLE_TEST, 'test_sequence_model1', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model2', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model3', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model4', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model5', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model6', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model7', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_expand_shape_model1'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model2'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model3'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model4'),
]
TEST_CASES += [
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool1d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool1d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool2d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm1d_3d_input_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm2d_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm2d_momentum_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm3d_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm3d_momentum_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConstantPad2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_groups', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad1size1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad2', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad2size1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_stride', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_padded'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_strided'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_with_multiplier'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_groups'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_groups_thnn', rtol=2e-4),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_strided'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_dilated_strided', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_groups'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_stride_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConvTranspose2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConvTranspose2d_no_bias', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ELU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Embedding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Embedding_sparse'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_GLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_GLU_dim'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LeakyReLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LeakyReLU_with_negval'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Linear', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Linear_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LogSoftmax'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool1d', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool1d_stride', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d_stride_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_1d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_1d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_2d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_3d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_3d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PixelShuffle'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PoissonNLLLLoss_no_reduce'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReflectionPad2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReplicationPad2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_SELU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Sigmoid'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softmax'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softmin'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softplus'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softsign', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Tanh'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ZeroPad2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_log_softmax_dim3'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_log_softmax_lastdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_softmax_functional_dim3'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_softmax_lastdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_right_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_singleton_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_addconstant', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_addmm', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_basic'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_chunk'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_clip'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_concat2'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_conv'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_convtranspose', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_exp'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_flatten'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_index'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_max'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_maxpool'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_min'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_mm', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_non_float_params'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_pad', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_params'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_permute2'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_pow', equal_nan=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_mean'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_mean_keepdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_sum'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_sum_keepdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_repeat', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_repeat_dim_overflow', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_selu'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_sqrt', equal_nan=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_symbolic_override', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_symbolic_override_nested'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_view', fail=True),
]
TEST_PATHS = set()
for test_case in TEST_CASES:
TEST_PATHS.add(test_case.test_dir)
if args.all:
models = glob.glob(os.path.join(ONNX_TEST_DATA, '*/*/model.onnx'))
for onnx in sorted(models):
path = os.path.dirname(onnx)
if path not in TEST_PATHS:
case = TestCase(os.path.dirname(path), os.path.basename(path),
fail=True)
TEST_CASES.append(case)
num_official_onnx_tests = len(TEST_CASES)
for backprop_test in gen_backprop_tests_oc.get_backprop_tests():
assert os.path.exists(backprop_test.test_dir)
TEST_CASES.append(backprop_test)
for backprop_test in gen_backprop_tests_pc.get_backprop_tests():
assert os.path.exists(backprop_test.test_dir)
TEST_CASES.append(backprop_test)
for test in gen_extra_test.get_tests():
assert os.path.exists(test.test_dir), test.test_dir
TEST_CASES.append(test)
for name, _, _, kwargs in gen_large_tests_oc.get_large_tests():
dirname = 'out'
TEST_CASES.append(TestCase(dirname, name, want_gpu=True, **kwargs))
TEST_CASES.append(TestCase('out', 'backprop_test_mnist_mlp'))
TEST_CASES.append(TestCase('data', 'shufflenet', want_gpu=True))
TEST_CASES.append(TestCase('data', 'mnist'))
TEST_CASES.extend(ch2o_tests.get())
TEST_CASES.extend(elichika_tests.get())
TEST_CASES.extend(onnx_chainer_tests.get())
TEST_CASES.extend(onnx_real_tests.get())
TEST_CASES.extend(gen_chainercv_model_tests.get_tests())
new_tests = []
for test in TEST_CASES:
if not test.is_backprop:
continue
# TODO(mkusumoto): remove this "if" after fixing issue
if not test.name.startswith('large_oc'):
new_test = copy.copy(test)
new_test.name = test.name + '_two_phase'
new_test.is_backprop_two_phase = True
new_tests.append(new_test)
# TODO(hamaji): Temporarily disabled due to shape inference change in ONNX.
if test.name.startswith('backprop_test_oc_split_2'):
continue
# TODO(hamaji): Unexpected shape will appear due to broadcast.
if test.name.startswith('backprop_test_oc_pow_const'):
continue
if test.fixed_batch_norm:
continue
# computation_order is supported in limited test cases
if test.name.startswith('backprop_test_oc'):
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_computation_order'
new_test.computation_order = 'dummy'
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
# add more tests for computation_order using CustomPolicy
if test.name.startswith('backprop_test_oc_tanh2') or\
test.name.startswith('backprop_test_oc_mul2') or\
test.name.startswith('backprop_test_oc_max_pool2'):
order_strings = [
'CF0,CF1,BF1,BF0',
'CF0,CF1,FFo0,CF1,BF1,BF0',
'CF0,CF1,FFo0,FFt0,CF0,CF1,BF1,BF0',
'CF0,CF1,FFt0,FFo0,CF0,CF1,BF1,BF0',
'CF0,CF1,FFt0,CF0,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFt0,CF0,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFo0,CF1,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFo0,CF1,FFt0,CF0,BF1,BF0',
'CF0,CF1,BF1,FFt0,CF0,BF0',
]
if not test.name.startswith('backprop_test_oc_max_pool2'):
order_strings.append(
'CF0,CF1,BF1,FFt0,CF0,FFo0,FFt0,CF0,CF1,BF0'
)
for order_string in order_strings:
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_custom_computation_order_'\
+ order_string
new_test.computation_order = 'custom_' + order_string
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
if test.name.startswith('backprop_test_oc_branched_conv'):
order_string = 'CF0,CF1,CF2,CF3,BF3,FFt2,BF1,CF2,BF2,BF0'
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_custom_computation_order_'\
+ order_string
new_test.computation_order = 'custom_' + order_string
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
# run gpu test for the test cases of onnx_chainer
# NOTE: We don't add tests for float16 case because they fail with --fuse
# option. We may resolve this in future.
if test.name.startswith('backprop_test_oc') and\
not test.name.endswith('float16'):
new_test = copy.copy(test)
new_test.want_gpu = True
new_tests.append(new_test)
for test in new_tests:
TEST_CASES.append(test)
if args.ngraph:
# TODO(hamaji): Triage these failures.
ngraph_blacklist = [
'extra_test_loop_scan_out',
'extra_backprop_test_need_stack_loop',
'ch2o_node_Linear_backprop',
'ch2o_node_Linear_backprop_diversed',
'backprop_test_oc_mul_same_float32_two_phase',
'backprop_test_oc_mul_same_float64_two_phase',
'backprop_test_oc_sigmoid_float64_two_phase',
'extra_backprop_test_need_stack_loop_two_phase',
'test_gemm_default_no_bias',
]
for test in TEST_CASES:
if test.name in ngraph_blacklist:
test.fail = True
if '_float16' in test.name:
# TODO(hamaji): Skip float16 tests since nGraph
# automatically promote float16 to float32.
test.fail = True
if test.name.endswith('_sigmoid_float64'):
# TODO(hamaji): nGraph seems not to support fp64 sigmoid.
test.fail = True
if re.search(r'grouped_conv_.*float64', test.name):
test.fail = True
if args.failed:
if not os.path.exists(args.failure_log):
raise RuntimeError('No failure log in %s' % args.failure_log)
failed_test_names = set()
with open(args.failure_log, 'rb') as f:
for line in f:
if line.startswith(b'=== '):
matched = re.match(r'=== (\S+) ===', line.decode())
if matched:
failed_test_names.add(matched.group(1))
TEST_CASES = [case for case in TEST_CASES
if case.name in failed_test_names]
if args.test_filter is not None:
reg = re.compile(args.test_filter)
TEST_CASES = [case for case in TEST_CASES if reg.search(case.name)]
if not args.all:
TEST_CASES = [case for case in TEST_CASES if not case.fail]
def _start_output(msg):
if sys.stdout.isatty():
if len(msg) > 75:
msg = msg[:36] + '...' + msg[-36:]
sys.stdout.write('\r' + ' ' * 78 + '\r' + msg)
else:
sys.stdout.write(msg)
class TestRunner(object):
def __init__(self, test_cases, show_log):
self.test_cases = test_cases
self.tested = []
self.failed = []
self.show_log = show_log
def run(self, num_parallel_jobs):
tests = list(reversed(self.test_cases))
procs = {}
while tests or procs:
if tests and len(procs) < num_parallel_jobs:
test_case = tests.pop()
if num_parallel_jobs == 1:
_start_output('%s... ' % test_case.name)
log_file = open(test_case.log_filename, 'wb')
proc = subprocess.Popen(test_case.args,
stdout=subprocess.PIPE,
stderr=log_file)
procs[proc.pid] = (test_case, proc, log_file)
continue
assert procs
pid, status = os.wait()
assert pid in procs
test_case, proc, log_file = procs[pid]
del procs[pid]
log_file.close()
if num_parallel_jobs != 1:
_start_output('%s... ' % test_case.name)
self.tested.append(test_case)
if status == 0:
if test_case.fail:
sys.stdout.write('%sOK (unexpected)%s\n' % (YELLOW, RESET))
else:
sys.stdout.write('%sOK%s' % (GREEN, RESET))
if not sys.stdout.isatty():
sys.stdout.write('\n')
else:
self.failed.append(test_case)
sys.stdout.write('%sFAIL%s: %s\n' %
(RED, RESET, test_case.repro_cmdline()))
if status != 0 or self.show_log:
sys.stdout.buffer.write(test_case.log_read())
if status != 0:
sys.stdout.write('%s$%s %s\n' %
(RED, RESET, test_case.repro_cmdline()))
sys.stdout.flush()
_start_output('')
sys.stdout.write('\n')
def main():
if not args.skip_build:
if os.path.exists('Makefile'):
subprocess.check_call(['make', '-j4'])
elif os.path.exists('build.ninja'):
subprocess.check_call('ninja')
if args.build_dir is None:
if os.path.exists('build/CMakeCache.txt'):
args.build_dir = 'build'
elif os.path.exists('CMakeCache.txt'):
args.build_dir = '.'
else:
args.build_dir = 'build'
run_onnx = os.path.join(args.build_dir, 'tools/run_onnx')
run_onnx_menoh = os.path.join(args.build_dir, 'menoh/run_onnx_menoh')
tested = []
failed = []
tests = []
gpu_tests = []
for test_case in TEST_CASES:
runner = run_onnx_menoh
if (test_case.is_backprop or
test_case.is_backprop_two_phase or
test_case.equal_nan or
test_case.skip_shape_inference or
test_case.skip_runtime_type_check or
test_case.want_gpu or
test_case.computation_order or
not test_case.test_dir.startswith(NODE_TEST)):
runner = run_onnx
test_case.args = [runner, '--test', test_case.test_dir]
test_case.args.append('--compiler_log')
is_gpu = False
if test_case.rtol is not None:
test_case.args += ['--rtol', str(test_case.rtol)]
if test_case.atol is not None:
test_case.args += ['--atol', str(test_case.atol)]
if test_case.equal_nan:
test_case.args += ['--equal_nan']
if test_case.skip_shape_inference:
test_case.args.append('--skip_inference')
if test_case.skip_runtime_type_check:
test_case.args.append('--skip_runtime_type_check')
if test_case.fixed_batch_norm:
test_case.args.append('--fixed_batch_norm')
if test_case.is_backprop_two_phase:
test_case.args.append('--backprop_two_phase')
elif test_case.is_backprop:
test_case.args.append('--backprop')
if test_case.computation_order:
test_case.args.append(
'--computation_order=' + test_case.computation_order)
elif args.computation_order:
test_case.args.append(
'--computation_order=' + args.computation_order)
if test_case.backend is not None:
test_case.args.append('--backend')
test_case.args.append(test_case.backend)
if args.verbose:
test_case.args.append('--verbose')
device = args.device
if test_case.want_gpu or args.use_gpu_all:
if not args.use_gpu and not args.use_gpu_all:
continue
if device is None:
device = 'cuda'
is_gpu = True
if device is not None:
test_case.args.extend(['-d', device])
if args.fuse:
test_case.args.append('--fuse_operations')
if is_gpu:
test_case.args.append('--use_nvrtc')
if args.ngraph:
test_case.args.append('--fuse_operations')
test_case.args.append('--use_ngraph')
if args.snpe:
test_case.args.append('--use_snpe')
if args.cache:
test_case.args.append('--use_cached_model')
if is_gpu:
gpu_tests.append(test_case)
else:
tests.append(test_case)
print('Testing %d tests with %s and %s' %
(len(tests + gpu_tests), run_onnx, run_onnx_menoh))
for test in tests + gpu_tests:
test.prepare()
for tests, num_jobs in [(tests, args.jobs), (gpu_tests, 1)]:
runner = TestRunner(tests, args.show_log)
runner.run(num_jobs)
tested += runner.tested
failed += runner.failed
if failed:
with open(args.failure_log, 'wb') as f:
for test in failed:
f.write(('=== %s ===\n' % test.name).encode())
f.write(('$ %s\n' % test.repro_cmdline()).encode())
f.write(test.log_read())
f.write('\n'.encode())
print('%d/%d tests failed! (see %s)' %
(len(failed), len(tested), args.failure_log))
sys.exit(1)
else:
print('ALL %d tests OK! (%d from ONNX)' %
(len(tested), num_official_onnx_tests))
main()
| 47.83209 | 104 | 0.729211 |
acee21df67d15837674408526b8d9b179dbd3050 | 10,690 | py | Python | src/main.py | RosaAmk/P-Androide | a23ef9be0f0aac0fe8b621c21188997b0e3235ee | [
"MIT"
] | null | null | null | src/main.py | RosaAmk/P-Androide | a23ef9be0f0aac0fe8b621c21188997b0e3235ee | [
"MIT"
] | null | null | null | src/main.py | RosaAmk/P-Androide | a23ef9be0f0aac0fe8b621c21188997b0e3235ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 20:51:21 2021
@author: roza
"""
from agent import Agent
from gen_graph import Graph
import random
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
import graphviz as pgv
import plot
from read_data import graph_sim
nb_agent = 100
nb_it = 20000
density=2/nb_agent
class Env():
def __init__(self):
self.R1 = 0
self.R2 = 0
def gen_from_density(n,d,method):
g1 = Graph(n,d)
g = []
for i in range(n):
g.append(Agent(method))
g1.gen_graph()
graph = pgv.Digraph()
for i in range(n):
graph.node(str(g[i].g_skill))
for e in g1.edges_final:
graph.edge(str(g[e[0]].g_skill), str(g[e[1]].g_skill))
return g, g1.edges_final
def gen_star(n, nb_branche, method):
g = []
for i in range(n):
g.append(Agent(method))
l = nb_branche*[-1]
e = []
d = 0
for i in range(1,n):
if l[d] == -1:
e.append((0,i))
else:
e.append((l[d],i))
l[d] = i
d = (d+1)%nb_branche
return g,e
def _2_complet(n, method):
g = []
for i in range(n):
g.append(Agent(method))
e = [(i,j) for i in range(n//2-1) for j in range(i+1,n//2)]+[(i,j) for i in range(n//2,n-1) for j in range(i+1,n)] + [(n//2-1,n//2)]
#plot.draw_graph(g,e,"test.gv")
return g,e
def gen_complet(n, method):
g = []
for i in range(n):
g.append(Agent(method))
e = [(i,j) for i in range(n) for j in range(i+1,n)]
return g,e
def gen_ring(n, method):
g = []
for i in range(n):
g.append(Agent(method))
e = [(i,(i+1)%n) for i in range(n)]
return g,e
def gen_chaine(n,method):
g = []
for i in range(n):
g.append(Agent(method))
e = [(i,i+1) for i in range(n-1)]
return g,e
def get_graph(graph, n, method):
if graph == 'ring':
return gen_ring(n, method)
elif graph == 'chaine':
return gen_chaine(n, method)
elif graph == 'star3':
return gen_star(n, 3, method)
elif graph == 'star10':
return gen_star(n, 10, method)
elif graph == 'complet':
return gen_complet(n, method)
def exp_real_data(nb_it = 20000, nb_agent=256):
res = dict()
methods = [ 'fitness prop','random', 'elitist', 'rank prop']
#methods = ['random']
gskills = dict()
data = graph_sim()
for method in methods:
print(method)
#c2 = Counter()
c3 = []
for run in range(11):
i = 0
for e in np.linspace(-1,1,21):
gskills[round(e,1)] = i
i += 1
#gskills_mat =np.zeros((21,nb_it//100))
graph = []
for i in range(nb_agent):
graph.append(Agent(method))
chrono_mat = np.zeros((nb_agent,nb_it))
c = Counter()
for e in np.linspace(-1,1,21):
c[round(e,1)] = 0
env = Env()
l = list(range(nb_agent))
dead_red = (nb_it+1)*[0]
active_red = (nb_it+1)*[0]
dead_blue = (nb_it+1)*[0]
active_blue = (nb_it+1)*[0]
none =(nb_it+1)*[0]
for a in graph:
if a.g_skill == None or a.g_skill == 0:
none[0] += 1
elif a.g_skill < 0:
if a.is_stopped():
dead_red[0] += 1
else:
active_red[0] += 1
elif a.g_skill > 0:
if a.is_stopped():
dead_blue[0] += 1
else:
active_blue[0] += 1
for i in range(nb_it):
print(i)
for j in range(4):
edges = data.get_edges(i*4+j)
env.R1 = nb_agent/2
env.R2 = nb_agent/2
random.shuffle(l)
for ag in l:
graph[ag].move()
graph[ag].compute_fitness(env)
for e in edges:
first = random.choice([0,1])
graph[e[first]].broadcast(graph[e[1-first]])
graph[e[1-first]].broadcast(graph[e[first]])
n = 0
for a in graph:
a.select_genome()
a.genomeList = []
if a.g_skill != None:
chrono_mat[n, i] = round(a.g_skill,1)
if a.g_skill == None or a.g_skill == 0:
none[i+1] += 1
elif a.g_skill < 0:
if a.is_stopped():
dead_red[i+1] += 1
else:
active_red[i+1] += 1
elif a.g_skill > 0:
if a.is_stopped():
dead_blue[i+1] += 1
else:
active_blue[i+1] += 1
n += 1
#if i%100 == 0 and i >19700:
#plot.draw_graph(graph,edges,"results/etoile3_2000/it"+str(i)+"_"+str(nb_agent)+"_"+method+"_"+str(run)+".gv",edge = True)
cpt = 0
for a in graph:
if not a.is_stopped() :
cpt += 1
if a.g_skill != None :
c[round(a.g_skill,1)] += 1
c3.append(cpt)
labels,values = zip(*sorted(c.items()))
print(cpt)
plot.draw_barplot(list(range(nb_it+1)), active_red,active_blue, none, dead_red, dead_blue,'graphe simulation- sigma 0,1 '+ method+' '+str(nb_agent))
#plot.draw_graph(graph,edges,"results/etoile3_2000/end_"+str(nb_agent)+"_"+method+"_"+str(run)+".gv",edge= True)
#plot.heatmap_plot(range(nb_it//100) ,gskills.keys(), gskills_mat , methods[method])
plot.chrono_plot( chrono_mat , 'graphe simulation- sigma 0,1 '+method+' '+str(nb_agent))
plot.histogramme_plot(labels,values,'graphe simulation- sigma 0,1 '+ method+' '+str(nb_agent))
res[method] = c3
data=[]
labels=[]
with open("realdata.txt", "a") as fichier:
fichier.write(str(nb_agent))
for k in res.keys():
data.append(res[k])
labels.append(k)
fichier.write(k)
fichier.write(str(res[k]))
def _eval(nb_it = 20000, nb_agent = 100, method = 'elitist', graph = 'ring', den= 0.1):
i = 0
gskills = dict()
for e in np.linspace(-1,1,21):
gskills[round(e,1)] = i
i += 1
gskills_mat =np.zeros((21,nb_it//100))
if graph == 'alea':
graph,edges = gen_from_density(nb_agent ,den,method)
else:
graph,edges = get_graph(graph, nb_agent,method)
chrono_mat = np.zeros((nb_agent,nb_it))
c = Counter()
for e in np.linspace(-1,1,21):
c[round(e,1)] = 0
env = Env()
l = list(range(nb_agent))
Liste_edges = list(range(len(edges)))
dead_red = nb_it*[0]
active_red = nb_it*[0]
dead_blue = nb_it*[0]
active_blue = nb_it*[0]
none = nb_it*[0]
for i in range(nb_it):
#print(i)
for j in range(4):
env.R1 = nb_agent/2
env.R2 = nb_agent/2
random.shuffle(l)
for ag in l:
graph[ag].move()
graph[ag].compute_fitness(env)
random.shuffle(Liste_edges)
for ind_edg in Liste_edges:
e = edges[ind_edg]
first = random.choice([0,1])
graph[e[first]].broadcast(graph[e[1-first]])
graph[e[1-first]].broadcast(graph[e[first]])
n = 0
for a in graph:
a.select_genome()
a.genomeList = []
if a.g_skill != None:
chrono_mat[n, i] = round(a.g_skill,1)
if a.g_skill == None or a.g_skill == 0:
none[i] += 1
elif a.g_skill < 0:
if a.is_stopped():
dead_red[i] += 1
else:
active_red[i] += 1
elif a.g_skill > 0:
if a.is_stopped():
dead_blue[i] += 1
else:
active_blue[i] += 1
n += 1
cpt = 0
for a in graph:
if not a.is_stopped() :
cpt += 1
if a.g_skill != None :
c[round(a.g_skill,1)] += 1
labels,values = zip(*sorted(c.items()))
#plot.draw_barplot(list(range(nb_it)), active_red,active_blue, none, dead_red, dead_blue,'graphe aleatoire densite == '+str(den)+' - sigma 0,1 '+ method)
plot.chrono_plot( chrono_mat , method+' '+str(nb_agent))
plot.histogramme_plot(labels,values,'graphe aleatoire densite == '+str(den)+' - sigma 0,1'+ method+' '+str(nb_agent))
return cpt
if __name__ == '__main__':
res = dict()
methods = [ 'fitness prop','random', 'elitist', 'rank prop']
for method in methods:
c2 = Counter()
c3 = []
for run in range(1):
c3.append(_eval(nb_it=20000,nb_agent=100,method=method,graph='ring'))
res[method] = c3
data=[]
labels=[]
with open("results.txt", "a") as fichier:
fichier.write(str(nb_agent))
for k in res.keys():
data.append(res[k])
labels.append(k)
fichier.write(str(res[k]))
plot.violin_plots(data, labels,'Agents active - sigma 0,1 - anneau de '+ str (nb_agent )+ ' agents' , 'Methode de selection', "Agents actives")
| 32.492401 | 165 | 0.438541 |
acee25819670ad843bb900c2bd5286217b9a9875 | 3,247 | py | Python | trainers/conv_aesthetic_trainer.py | tinenbruno/aesthetic_classifier | ca2103b28c9cbd563149fc8892485ee93c1b2f01 | [
"Apache-2.0"
] | null | null | null | trainers/conv_aesthetic_trainer.py | tinenbruno/aesthetic_classifier | ca2103b28c9cbd563149fc8892485ee93c1b2f01 | [
"Apache-2.0"
] | null | null | null | trainers/conv_aesthetic_trainer.py | tinenbruno/aesthetic_classifier | ca2103b28c9cbd563149fc8892485ee93c1b2f01 | [
"Apache-2.0"
] | null | null | null | from base.base_trainer import BaseTrain
import os
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.optimizers import Adam
class ConvAestheticModelTrainer(BaseTrain):
def __init__(self, model, data, val_data, config):
super(ConvAestheticModelTrainer, self).__init__(model, data, config)
self.callbacks = []
self.loss = []
self.acc = []
self.val_loss = []
self.val_acc = []
self.val_data = val_data
self.init_callbacks()
def init_callbacks(self):
self.callbacks.append(
ModelCheckpoint(
filepath=os.path.join(self.config.callbacks.checkpoint_dir, '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp.name),
monitor=self.config.callbacks.checkpoint_monitor,
mode=self.config.callbacks.checkpoint_mode,
save_best_only=self.config.callbacks.checkpoint_save_best_only,
save_weights_only=self.config.callbacks.checkpoint_save_weights_only,
verbose=self.config.callbacks.checkpoint_verbose,
)
)
self.callbacks.append(
TensorBoard(
log_dir=self.config.callbacks.tensorboard_log_dir,
write_graph=self.config.callbacks.tensorboard_write_graph,
)
)
if hasattr(self.config,"comet_api_key"):
from comet_ml import Experiment
experiment = Experiment(api_key=self.config.comet_api_key, project_name=self.config.exp_name)
experiment.add_tags(self.config.tags)
experiment.disable_mp()
experiment.log_parameters(self.config)
self.callbacks.append(experiment.get_keras_callback())
def train(self):
history = self.model.fit(
self.data[0], self.data[1],
epochs=2,
verbose=self.config.trainer.verbose_training,
steps_per_epoch=1360,
callbacks=self.callbacks,
validation_data=(self.val_data[0], self.val_data[1]),
validation_steps=34
)
self.loss.extend(history.history['loss'])
self.acc.extend(history.history['acc'])
self.val_loss.extend(history.history['val_loss'])
self.val_acc.extend(history.history['val_acc'])
for layer in self.model.layers[:-1]:
layer.trainable = True
self.model.compile(loss='categorical_hinge',optimizer=Adam(lr=0.0003, decay=0.003), metrics=['accuracy'])
history = self.model.fit(
self.data[0], self.data[1],
epochs=self.config.trainer.num_epochs,
verbose=self.config.trainer.verbose_training,
steps_per_epoch=1360,
callbacks=self.callbacks,
validation_data=(self.val_data[0], self.val_data[1]),
validation_steps=34
)
self.loss.extend(history.history['loss'])
self.acc.extend(history.history['acc'])
self.val_loss.extend(history.history['val_loss'])
self.val_acc.extend(history.history['val_acc'])
loss, acc = self.model.evaluate(x=self.val_data[0], y=self.val_data[1], steps=34)
print(loss)
print(acc)
| 39.120482 | 137 | 0.630428 |
acee2665be56d9557d32bf66933c2ee8a0c23b91 | 829 | py | Python | 17-06-05-Machine-Learning-For-Trading/24_mask.py | maraboinavamshi/courses | 48f255ffb1903ba20865c2b91b488758d5cb1a09 | [
"Apache-2.0"
] | 15 | 2017-09-19T08:09:01.000Z | 2019-04-29T00:37:51.000Z | 17-06-05-Machine-Learning-For-Trading/24_mask.py | chitrita/Courses-1 | 7713267ee5c92e488086588ac41490c44b4f7350 | [
"Apache-2.0"
] | null | null | null | 17-06-05-Machine-Learning-For-Trading/24_mask.py | chitrita/Courses-1 | 7713267ee5c92e488086588ac41490c44b4f7350 | [
"Apache-2.0"
] | 17 | 2018-02-27T03:15:54.000Z | 2019-04-24T09:26:46.000Z | import numpy as np
''' Look up: https://docs.scipy.org/doc/numpy/reference/routines.random.html '''
def generate_array():
#x = np.random.rand(4, 5)
x = np.array([(20, 25, 10, 23, 26, 32, 10, 5, 0),
(0, 2, 50, 20, 0, 1, 28, 5, 0)])
print('Given array: ')
print(x)
return x
def access_elements(array):
indexes = np.array([1, 1, 2, 3])
elements = array[indexes]
print('Requested elements:')
print(elements)
return elements
def masking(array, mask_value):
masked = array[array < mask_value]
print('Masked')
print(masked)
def replacing(array, mask_value, new_value):
array[array < mask_value] = new_value
print('Replaced:')
print(array)
if __name__ == "__main__":
x = generate_array()
mean_value = x.mean()
print("Mean:")
print(mean_value)
masking(x, mean_value)
replacing(x, mean_value, 0)
| 17.638298 | 80 | 0.664656 |
acee2673be7f82a2bcda2612f7fa382f64383458 | 71 | py | Python | data/clear_test.py | littleleben/CNN-demo-class-data_ask- | b39147f80ebb48d960f643c207f5b485745790d8 | [
"MIT"
] | null | null | null | data/clear_test.py | littleleben/CNN-demo-class-data_ask- | b39147f80ebb48d960f643c207f5b485745790d8 | [
"MIT"
] | null | null | null | data/clear_test.py | littleleben/CNN-demo-class-data_ask- | b39147f80ebb48d960f643c207f5b485745790d8 | [
"MIT"
] | null | null | null | fr=open(fileread,'r',encoding='utf-8')
fw=open(filewrite,'w',encoding=) | 35.5 | 38 | 0.71831 |
acee26bd58cbea19539aeadd10a03b9ae26b1576 | 1,083 | py | Python | qiskit/tools/visualization/__init__.py | kifumi/qiskit-terra | 203fca6d694a18824a6b12cbabd3dd2c64dd12ae | [
"Apache-2.0"
] | null | null | null | qiskit/tools/visualization/__init__.py | kifumi/qiskit-terra | 203fca6d694a18824a6b12cbabd3dd2c64dd12ae | [
"Apache-2.0"
] | null | null | null | qiskit/tools/visualization/__init__.py | kifumi/qiskit-terra | 203fca6d694a18824a6b12cbabd3dd2c64dd12ae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Main QISKit visualization methods."""
import sys
from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\
latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme
from ._error import VisualizationError
from ._state_visualization import plot_bloch_vector
if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):
import requests
if requests.get(
'https://qvisualization.mybluemix.net/').status_code == 200:
from .interactive._iplot_state import iplot_state as plot_state
from .interactive._iplot_histogram import iplot_histogram as \
plot_histogram
else:
from ._state_visualization import plot_state
from ._counts_visualization import plot_histogram
else:
from ._state_visualization import plot_state
from ._counts_visualization import plot_histogram
| 33.84375 | 89 | 0.755309 |
acee26eeb642fd4a8242aa4b3c7b86ef6a119f82 | 1,561 | py | Python | test/test_simon.py | frevson/Qiskit-Aqua | ea41ec3229aad2d0cc612315162ca2e21359ec42 | [
"Apache-2.0"
] | null | null | null | test/test_simon.py | frevson/Qiskit-Aqua | ea41ec3229aad2d0cc612315162ca2e21359ec42 | [
"Apache-2.0"
] | null | null | null | test/test_simon.py | frevson/Qiskit-Aqua | ea41ec3229aad2d0cc612315162ca2e21359ec42 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from parameterized import parameterized
from qiskit_aqua.components.oracles import SimonOracle
from qiskit_aqua.algorithms import Simon
from qiskit_aqua import get_aer_backend
from test.common import QiskitAquaTestCase
class TestSimon(QiskitAquaTestCase):
@parameterized.expand([
[{'000': '001', '001': '010', '010': '011', '011': '100',
'100': '101', '101': '110', '110': '111', '111': '000'}],
[{'000': '101', '001': '010', '010': '000', '011': '110',
'100': '000', '101': '110', '110': '101', '111': '010'}]
])
def test_simon(self, simon_input):
backend = get_aer_backend('qasm_simulator')
oracle = SimonOracle(simon_input)
algorithm = Simon(oracle)
result = algorithm.run(backend)
self.assertTrue(result['oracle_evaluation'])
if __name__ == '__main__':
unittest.main()
| 34.688889 | 79 | 0.641896 |
acee274ac367b9870740612f1be0bdcba06cc483 | 505 | py | Python | src/models/price_to_earnings.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | null | null | null | src/models/price_to_earnings.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | 1 | 2021-08-31T05:51:09.000Z | 2021-08-31T05:51:09.000Z | src/models/price_to_earnings.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | null | null | null | # PE or Price-to-Earnings ratio. It's the number which best reflects the current value of a company.
# This number shows you how much investor money, or your money it takes to generate $1 of company profit.
# If the PE is 100, it means for every $100 you invest in a company that company can generate $1 of profit.
# Historically, average PEs have been around 15 to 17 for US companies. So 100 means the stock is extremely expensive.
from src.models import Model
class PriceToEarning(Model):
pass
| 56.111111 | 118 | 0.764356 |
acee2763b557caf6449e8658dacda3a42d63ef5f | 6,881 | py | Python | src/check_solution.py | roessig/verify-nn | 44e582f93c2a7702491be9ec351e0f5d7cf8a038 | [
"CC-BY-4.0"
] | 1 | 2021-12-22T18:42:55.000Z | 2021-12-22T18:42:55.000Z | src/check_solution.py | roessig/verify-nn | 44e582f93c2a7702491be9ec351e0f5d7cf8a038 | [
"CC-BY-4.0"
] | null | null | null | src/check_solution.py | roessig/verify-nn | 44e582f93c2a7702491be9ec351e0f5d7cf8a038 | [
"CC-BY-4.0"
] | 2 | 2021-09-24T02:04:11.000Z | 2021-09-28T09:57:01.000Z | from model_boundd import MIPwithBounds
import networkx as nx
def get_vars_and_coefficients(elements, start=3):
"""Use a list which comes from line.split() to create lists of float coefficients and SCIP variables."""
return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]
def check_sol(filepath, value_dict, eps=1e-8, print_values=False):
"""Check solution given by input variables for feasibility.
Args:
filepath: str, path to .rlv file with AssertOut for output constraints
value_dict: dict, mapping input variables names (str) to values of the solution
eps: float, tolerance for checking
Returns:
true, if solution is valid, false otherwise
"""
graph = nx.DiGraph()
relu_nodes = set()
max_pool_nodes = set()
linear_nodes = set()
relu_in_nodes = set()
mip = MIPwithBounds(filepath, 1e-7)
model, vars = mip.read_file_into_graph()
# vars is a dict of the input nodes
output_cons = []
input_cons = []
input_bounds = {}
with open(filepath, "r") as f:
for line in f:
if line.startswith("#"):
continue
elements = line.split()
if elements[0] == "Input":
input_bounds[elements[1]] = {"lb": None, "ub": None}
graph.add_node(elements[1], node_type="input")
if elements[0] == "ReLU":
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
relu_nodes.add(elements[1])
graph.add_node(elements[1] + "_in", bias=bias)
graph.add_edge(elements[1] + "_in", elements[1])
relu_in_nodes.add(elements[1] + "_in")
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1] + "_in", weight=w)
if elements[0] == "Linear":
linear_nodes.add(elements[1])
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
graph.add_node(elements[1], bias=bias)
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1], weight=w)
if elements[0] == "MaxPool":
max_pool_nodes.add(elements[1])
graph.add_node(elements[1], node_type="max_pool")
graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)
if elements[0] == "AssertOut":
output_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
if elements[0] == "Assert":
input_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
"""if len(elements) == 5 and elements[-1] in input_bounds:
if elements[1] == "<=":
new_lb = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["lb"] is None or input_bounds[elements[-1]]["lb"] < new_lb:
input_bounds[elements[-1]]["lb"] = new_lb
elif elements[1] == ">=":
new_ub = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["ub"] is None or input_bounds[elements[-1]]["ub"] > new_ub:
input_bounds[elements[-1]]["ub"] = new_ub"""
val = True
for lhs, direction, (variables, coeffs) in input_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
print(lhs, direction, variables, coeffs)
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
print(lhs, direction, variables, coeffs)
break
else:
raise NotImplementedError
if not val: # input constraints do not hold
print("input constraints not fulfilled")
return False
else:
if print_values:
print("input constraints hold")
nodes_sorted = list(nx.topological_sort(graph))
relu_phases = {x: -1 for x in relu_nodes}
relu_phases_all = {x: 0 for x in relu_nodes}
for node in nodes_sorted:
if node in vars:
continue # skip the input nodes
new_value = 0
if node in linear_nodes or node in relu_in_nodes:
for n in graph.predecessors(node):
new_value += graph.edges[n, node]["weight"] * value_dict[n]
new_value += graph.node[node]["bias"]
elif node in max_pool_nodes:
new_value = max(value_dict[n] for n in graph.predecessors(node))
elif node in relu_nodes:
pred = list(graph.predecessors(node))
assert len(pred) == 1
if value_dict[pred[0]] > 0: # apply ReLU here
new_value = value_dict[pred[0]]
relu_phases[node] = 1
else:
relu_phases[node] = 0
value_dict[node] = new_value
for relu, phase in relu_phases.items():
assert phase >= 0
relu_phases_all[relu] += phase
if print_values:
for s in value_dict.items():
print(s)
val = True
# check the ouput constraints
#print(output_cons)
for lhs, direction, (variables, coeffs) in output_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
break
else:
raise NotImplementedError
return val
if __name__ == "__main__":
directory = "../benchmarks/collisionDetection/"
directory2 = "../../benchmarks/scip/ACAS/"
directory3 = "../benchmarks/twinladder/"
directory5_out = "../benchmarks/mnist/"
filepath = directory2 + "property2/5_3.rlv"
#filepath = directory2 + "property5/property.rlv"
#filepath = directory2 + "property_3.rlv"
file = "../logs/neurify_11_10_0_adv"
with open(file, "r") as f:
list_of_pixels = [float(x) for x in f.readline()[:-1].split()]
#value_dict = {"in_" + str(i): x*255 for i, x in enumerate(list_of_pixels)}
value_dict = {'in_0': 55947.69100, 'in_1': 0.198666, 'in_2': -3.051407, 'in_3': 1145.0000, 'in_4': 50.768384}
if check_sol(filepath, value_dict=value_dict, eps=1e-2, print_values=True):
print("valid solution found -> SAT")
else:
print("the solution is not valid")
| 34.405 | 113 | 0.562564 |
acee27c87d1708a71ea1bf5753304389b0bc48b7 | 3,508 | py | Python | pypeln/task/api/from_iterable.py | Davidnet/pypeln | 6e1295c2ac7914dadfa546a937537aa2c2a5978d | [
"MIT"
] | null | null | null | pypeln/task/api/from_iterable.py | Davidnet/pypeln | 6e1295c2ac7914dadfa546a937537aa2c2a5978d | [
"MIT"
] | null | null | null | pypeln/task/api/from_iterable.py | Davidnet/pypeln | 6e1295c2ac7914dadfa546a937537aa2c2a5978d | [
"MIT"
] | null | null | null | import asyncio
import time
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from .. import utils
from ..queue import IterableQueue
from ..stage import Stage
from ..worker import ProcessFn, Worker
class FromIterable(tp.NamedTuple):
iterable: tp.Union[tp.Iterable, tp.AsyncIterable]
async def __call__(self, worker: Worker, **kwargs):
iterable: tp.AsyncIterable
if isinstance(self.iterable, tp.AsyncIterable):
iterable = self.iterable
else:
sync_iterable: tp.Iterable
if isinstance(self.iterable, pypeln_utils.BaseStage):
sync_iterable = self.iterable.to_iterable(maxsize=0, return_index=True)
else:
sync_iterable = self.iterable
queue = IterableQueue(
maxsize=max(q.maxsize for q in worker.stage_params.output_queues)
)
loop = utils.get_running_loop()
loop.run_in_executor(
None,
lambda: FromIterable.consume_iterable(
worker=worker, iterable=sync_iterable, queue=queue, loop=loop
),
)
iterable = queue
i = 0
async for x in iterable:
if not isinstance(x, pypeln_utils.Element):
x = pypeln_utils.Element(index=(i,), value=x)
await worker.stage_params.output_queues.put(x)
i += 1
@staticmethod
def consume_iterable(
worker: Worker,
iterable: tp.Iterable,
queue: IterableQueue,
loop: asyncio.AbstractEventLoop,
):
try:
for x in iterable:
if worker.is_done:
return
while queue.full():
if worker.is_done:
return
time.sleep(pypeln_utils.TIMEOUT)
asyncio.run_coroutine_threadsafe(queue.put(x), loop)
asyncio.run_coroutine_threadsafe(queue.done(), loop)
except BaseException as e:
e = queue.get_pipeline_exception(e)
asyncio.run_coroutine_threadsafe(queue.raise_exception(e), loop)
@tp.overload
def from_iterable(
iterable: tp.Union[tp.Iterable[T], tp.AsyncIterable[T]], use_thread: bool = True,
) -> Stage[T]:
...
@tp.overload
def from_iterable(use_thread: bool = True) -> pypeln_utils.Partial[Stage[T]]:
...
def from_iterable(
iterable: tp.Union[
tp.Iterable[T], tp.AsyncIterable[T], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
use_thread: bool = True,
) -> tp.Union[Stage[T], pypeln_utils.Partial[Stage[T]]]:
"""
Creates a stage from an iterable. This function gives you more control of the iterable is consumed.
Arguments:
iterable: A source Iterable or AsyncIterable.
use_thread: This parameter is not used and only kept for API compatibility with the other modules.
Returns:
Returns a `Stage` if the `iterable` parameters is given, else it returns a `Partial`.
"""
if isinstance(iterable, pypeln_utils.Undefined):
return pypeln_utils.Partial(
lambda iterable: from_iterable(iterable, use_thread=use_thread)
)
return Stage(
process_fn=FromIterable(iterable),
workers=1,
maxsize=0,
total_sources=1,
timeout=0,
dependencies=[],
on_start=None,
on_done=None,
f_args=[],
)
| 28.290323 | 106 | 0.607754 |
acee27f5bc96d136f4041fe2462ac8b8fba39428 | 1,163 | py | Python | test/cts/tool/CTSConverter/src/nn/specs/V1_1/fully_connected_float_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
] | 255 | 2020-05-22T07:45:29.000Z | 2022-03-29T23:58:22.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/fully_connected_float_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
] | 5,102 | 2020-05-22T07:48:33.000Z | 2022-03-31T23:43:39.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/fully_connected_float_relaxed.mod.py | ibelem/webml-polyfill | aaf1ba4f5357eaf6e89bf9990f5bdfb543cd2bc2 | [
"Apache-2.0"
] | 120 | 2020-05-22T07:51:08.000Z | 2022-02-16T19:08:05.000Z | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}")
weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2])
bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [4])
out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}")
act = Int32Scalar("act", 0)
model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {in0: # input 0
[2, 32, 16]}
output0 = {out0: # output 0
[8, 68, 36]}
# Instantiate an example
Example((input0, output0))
| 34.205882 | 76 | 0.688736 |
acee2943298695653b9e894e9d27602018a75265 | 217 | py | Python | server/lib/python3.9/site-packages/stripe/api_resources/mandate.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 1,078 | 2015-01-06T03:35:05.000Z | 2022-03-25T13:25:48.000Z | server/lib/python3.9/site-packages/stripe/api_resources/mandate.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 558 | 2015-01-07T19:05:02.000Z | 2022-03-28T22:19:24.000Z | server/lib/python3.9/site-packages/stripe/api_resources/mandate.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 382 | 2015-01-04T14:06:09.000Z | 2022-03-16T04:52:04.000Z | # File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import APIResource
class Mandate(APIResource):
OBJECT_NAME = "mandate"
| 24.111111 | 64 | 0.815668 |
acee29e095e334b7f0a675bf59433f56dbc6953e | 5,830 | py | Python | tests/browser/test_browser.py | idealistdev/alfajor | af8a750ec38bf01150a8fe004c8336e4d0be7030 | [
"BSD-3-Clause"
] | 2 | 2015-11-01T22:29:54.000Z | 2018-02-15T15:54:30.000Z | tests/browser/test_browser.py | idealistdev/alfajor | af8a750ec38bf01150a8fe004c8336e4d0be7030 | [
"BSD-3-Clause"
] | 1 | 2015-11-04T09:59:43.000Z | 2020-04-26T06:19:00.000Z | tests/browser/test_browser.py | idealistdev/alfajor | af8a750ec38bf01150a8fe004c8336e4d0be7030 | [
"BSD-3-Clause"
] | 3 | 2015-09-16T14:31:23.000Z | 2020-09-08T06:34:12.000Z | # Copyright Action Without Borders, Inc., the Alfajor authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'alfajor' and is distributed under the BSD license.
# See LICENSE for more details.
import time
from nose.tools import raises
from . import browser, browser_test, screenshot_fails
@browser_test()
def test_simple():
browser.open('/')
if 'status' in browser.capabilities:
assert browser.status_code == 200
assert browser.status == '200 OK'
if 'headers' in browser.capabilities:
assert 'text/html' in browser.headers['Content-Type']
assert not browser.cookies
# This is generally not a safe assertion... the browser could (and does)
# normalize the returned html in some fashion.
assert browser.response == ('<html><head></head>'
'<body><p>hi there</p></body></html>')
assert browser.document.cssselect('p')[0].text == 'hi there'
@browser_test()
def test_reset():
# TODO: flesh this out when cookie querying is working and has
# test coverage. until then, just verify that the method doesn't
# explode.
browser.open('/')
@browser_test()
def test_user_agent():
browser.open('/')
ua = browser.user_agent
assert ua['browser'] != 'unknown'
@browser_test()
def test_traversal():
browser.open('/seq/a')
a_id = browser.document['#request_id'].text
assert browser.cssselect('title')[0].text == 'seq/a'
assert browser.location.endswith('/seq/a')
assert not browser.cssselect('p.referrer')[0].text
browser.cssselect('a')[0].click(wait_for='page')
b_id = browser.document['#request_id'].text
assert a_id != b_id
assert browser.cssselect('title')[0].text == 'seq/b'
assert browser.location.endswith('/seq/b')
assert '/seq/a' in browser.cssselect('p.referrer')[0].text
# bounce through a redirect
browser.cssselect('a')[0].click(wait_for='page')
d_id = browser.document['#request_id'].text
assert d_id != b_id
assert browser.cssselect('title')[0].text == 'seq/d'
assert browser.location.endswith('/seq/d')
assert '/seq/b' in browser.cssselect('p.referrer')[0].text
@browser_test()
def _test_single_cookie(bounce):
browser.open('/')
assert not browser.cookies
if bounce:
landing_page = browser.location
browser.open('/assign-cookie/1?bounce=%s' % landing_page)
else:
browser.open('/assign-cookie/1')
assert browser.cookies == {'cookie1': 'value1'}
browser.reset()
assert not browser.cookies
browser.open('/')
assert not browser.cookies
@browser_test()
def test_single_cookie():
yield _test_single_cookie, False
yield _test_single_cookie, True
@browser_test()
def _test_multiple_cookies(bounce):
browser.open('/')
assert not browser.cookies
if bounce:
landing_page = browser.location
browser.open('/assign-cookie/2?bounce=%s' % landing_page)
else:
browser.open('/assign-cookie/2')
assert browser.cookies == {'cookie1': 'value1',
'cookie2': 'value 2'}
browser.reset()
assert not browser.cookies
browser.open('/')
assert not browser.cookies
@browser_test()
def test_multiple_cookies():
yield _test_multiple_cookies, False
yield _test_multiple_cookies, True
@browser_test()
def test_wait_for():
# bare minimum no side-effects call browser.wait_for
browser.wait_for('duration', 1)
@browser_test()
def test_wait_for_duration():
if 'selenium' in browser.capabilities:
start = time.time()
browser.open('/waitfor', wait_for='duration', timeout=1000)
duration = time.time() - start
assert duration >= 1
@browser_test()
def test_wait_for_element():
if 'selenium' in browser.capabilities:
browser.open('/waitfor')
browser.cssselect('a#appender')[0].click(
wait_for='element:css=#expected_p', timeout=3000)
assert browser.cssselect('#expected_p')
@browser_test()
@raises(AssertionError)
def test_wait_for_element_not_found():
if 'selenium' in browser.capabilities:
browser.open('/waitfor')
browser.wait_for('element:css=#unexisting', timeout=10)
else:
raise AssertionError('Ignore if not selenium')
@browser_test()
def test_wait_for_element_not_present():
if 'selenium' in browser.capabilities:
browser.open('/waitfor')
assert browser.cssselect('#removeme')
browser.cssselect('#remover')[0].click(
wait_for='!element:css=#removeme', timeout=3000)
assert not browser.cssselect('#removeme')
@browser_test()
def test_wait_for_ajax():
if 'selenium' in browser.capabilities:
browser.open('/waitfor')
browser.cssselect('#ajaxappender')[0].click(
wait_for='ajax', timeout=3000)
assert len(browser.cssselect('.ajaxAdded')) == 3
@browser_test()
def test_wait_for_js():
if 'selenium' in browser.capabilities:
browser.open('/waitfor')
browser.cssselect('#counter')[0].click(
wait_for='js:window.exampleCount==100;', timeout=3000)
@browser_test()
def test_set_cookie():
if 'cookies' in browser.capabilities:
browser.open('/')
browser.set_cookie('foo', 'bar')
browser.set_cookie('py', 'py', 'localhost.local', port='8008')
browser.set_cookie('green', 'frog',
session=False, expires=time.time() + 3600)
assert 'foo' in browser.cookies
assert 'py' in browser.cookies
assert 'green' in browser.cookies
@browser_test()
@screenshot_fails('test_screenshot.png')
def test_screenshot():
if 'javascript' not in browser.capabilities:
return
browser.open('http://www.google.com')
assert False
| 28.300971 | 79 | 0.659863 |
acee2ab6743c591d6436e451478c748e37575a3d | 2,867 | py | Python | Drest_auth/users/migrations/0001_initial.py | abdullah1107/DjangoAdmin | 72e902c0169a7442420425aa5b9473b2e826af48 | [
"MIT"
] | 1 | 2020-07-12T13:14:34.000Z | 2020-07-12T13:14:34.000Z | Drest_auth/users/migrations/0001_initial.py | abdullah1107/DjangoAdmin | 72e902c0169a7442420425aa5b9473b2e826af48 | [
"MIT"
] | null | null | null | Drest_auth/users/migrations/0001_initial.py | abdullah1107/DjangoAdmin | 72e902c0169a7442420425aa5b9473b2e826af48 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-06-15 06:49
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.711111 | 329 | 0.663062 |
acee2bbb85b1db3db1369c0aa87681e03d13d745 | 887 | py | Python | starter-project/py/src/agent.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | 1 | 2021-10-02T12:42:14.000Z | 2021-10-02T12:42:14.000Z | starter-project/py/src/agent.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | null | null | null | starter-project/py/src/agent.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | null | null | null | from forta_agent import Finding, FindingType, FindingSeverity
MEDIUM_GAS_THRESHOLD = 1000000
HIGH_GAS_THRESHOLD = 3000000
CRITICAL_GAS_THRESHOLD = 7000000
def handle_transaction(transaction_event):
findings = []
gas_used = int(transaction_event.gas_used)
if gas_used < MEDIUM_GAS_THRESHOLD:
return findings
findings.append(Finding({
'name': 'High Gas Used',
'description': f'Gas Used: {gas_used}',
'alert_id': 'FORTA-1',
'type': FindingType.Suspicious,
'severity': get_severity(gas_used),
'metadata': {
'gas_used': gas_used
}
}))
return findings
def get_severity(gas_used):
if gas_used > CRITICAL_GAS_THRESHOLD:
return FindingSeverity.Critical
elif gas_used > HIGH_GAS_THRESHOLD:
return FindingSeverity.High
else:
return FindingSeverity.Medium
| 25.342857 | 61 | 0.67531 |
acee2c9f233148a3f7f9cf6b5ce0f4920ff47764 | 487 | py | Python | gen_diSpeech-F0.py | Orange-OpenSource/diSpeech | a34f29225c36316a0fb6722b75535937b4525e61 | [
"MIT"
] | 2 | 2021-09-16T11:40:09.000Z | 2022-03-03T12:21:20.000Z | gen_diSpeech-F0.py | Orange-OpenSource/diSpeech | a34f29225c36316a0fb6722b75535937b4525e61 | [
"MIT"
] | null | null | null | gen_diSpeech-F0.py | Orange-OpenSource/diSpeech | a34f29225c36316a0fb6722b75535937b4525e61 | [
"MIT"
] | null | null | null | # Software Name : diSpeech
# Version: 1.0.0
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: MIT
# This software is distributed under the MIT,
# the text of which is available at https://opensource.org/licenses/MIT
# or see the "LICENSE" file for more details.
# Author: Olivier Zhang
from dataset import DownSampleDiSpeech
base_dataset = 'diSpeech-Bbig'
stride_dict = {'F0': 3}
dispeech_f0 = DownSampleDiSpeech('diSpeech-F0', base_dataset, stride_dict)
| 30.4375 | 74 | 0.765914 |
acee2cae64c2742bc1f3b77b4cb2741d04abea94 | 4,302 | py | Python | zos/message.py | zosnet/zos-python | 6f005298349cad3a00906e43d2dc9cf2c36ce2d4 | [
"MIT"
] | 2 | 2020-02-28T07:40:02.000Z | 2020-02-28T07:40:05.000Z | zos/message.py | zosnet/zos-python | 6f005298349cad3a00906e43d2dc9cf2c36ce2d4 | [
"MIT"
] | null | null | null | zos/message.py | zosnet/zos-python | 6f005298349cad3a00906e43d2dc9cf2c36ce2d4 | [
"MIT"
] | null | null | null | import re
import logging
from binascii import hexlify, unhexlify
from graphenebase.ecdsa import verify_message, sign_message
from zosbase.account import PublicKey
from zos.instance import shared_transnet_instance
from zos.account import Account
from .exceptions import InvalidMessageSignature
from .storage import configStorage as config
log = logging.getLogger(__name__)
MESSAGE_SPLIT = (
"-----BEGIN BITSHARES SIGNED MESSAGE-----",
"-----BEGIN META-----",
"-----BEGIN SIGNATURE-----",
"-----END BITSHARES SIGNED MESSAGE-----"
)
SIGNED_MESSAGE_META = """{message}
account={meta[account]}
memokey={meta[memokey]}
block={meta[block]}
timestamp={meta[timestamp]}"""
SIGNED_MESSAGE_ENCAPSULATED = """
{MESSAGE_SPLIT[0]}
{message}
{MESSAGE_SPLIT[1]}
account={meta[account]}
memokey={meta[memokey]}
block={meta[block]}
timestamp={meta[timestamp]}
{MESSAGE_SPLIT[2]}
{signature}
{MESSAGE_SPLIT[3]}
"""
class Message():
def __init__(self, message, transnet_instance=None):
self.transnet = transnet_instance or shared_transnet_instance()
self.message = message
def sign(self, account=None, **kwargs):
""" Sign a message with an account's memo key
:param str account: (optional) the account that owns the bet
(defaults to ``default_account``)
:returns: the signed message encapsulated in a known format
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
# Data for message
account = Account(account, transnet_instance=self.transnet)
info = self.transnet.info()
meta = dict(
timestamp=info["time"],
block=info["head_block_number"],
memokey=account["options"]["memo_key"],
account=account["name"])
# wif key
wif = self.transnet.wallet.getPrivateKeyForPublicKey(
account["options"]["memo_key"]
)
# signature
message = self.message.strip()
signature = hexlify(sign_message(
SIGNED_MESSAGE_META.format(**locals()),
wif
)).decode("ascii")
message = self.message
return SIGNED_MESSAGE_ENCAPSULATED.format(
MESSAGE_SPLIT=MESSAGE_SPLIT,
**locals()
)
def verify(self, **kwargs):
""" Verify a message with an account's memo key
:param str account: (optional) the account that owns the bet
(defaults to ``default_account``)
:returns: True if the message is verified successfully
:raises InvalidMessageSignature if the signature is not ok
"""
# Split message into its parts
parts = re.split("|".join(MESSAGE_SPLIT), self.message)
parts = [x for x in parts if x.strip()]
assert len(parts) > 2, "Incorrect number of message parts"
message = parts[0].strip()
signature = parts[2].strip()
# Parse the meta data
meta = dict(re.findall(r'(\S+)=(.*)', parts[1]))
# Ensure we have all the data in meta
assert "account" in meta
assert "memokey" in meta
assert "block" in meta
assert "timestamp" in meta
# Load account from blockchain
account = Account(
meta.get("account"),
transnet_instance=self.transnet)
# Test if memo key is the same as on the blockchain
if not account["options"]["memo_key"] == meta["memokey"]:
log.error(
"Memo Key of account {} on the Blockchain".format(
account["name"]) +
"differs from memo key in the message: {} != {}".format(
account["options"]["memo_key"], meta["memokey"]
)
)
# Reformat message
message = SIGNED_MESSAGE_META.format(**locals())
# Verify Signature
pubkey = verify_message(message, unhexlify(signature))
# Verify pubky
pk = PublicKey(hexlify(pubkey).decode("ascii"))
if format(pk, self.transnet.prefix) != meta["memokey"]:
raise InvalidMessageSignature
return True
| 30.510638 | 72 | 0.607159 |
acee2f32cc9163886038dacb1104ecb1c4050ec2 | 2,433 | py | Python | libqtile/resources/default_config.py | CharString/qtile | 7dc1448fa866d4c3a2a5c2738c5cf3fcd10b0631 | [
"MIT"
] | null | null | null | libqtile/resources/default_config.py | CharString/qtile | 7dc1448fa866d4c3a2a5c2738c5cf3fcd10b0631 | [
"MIT"
] | null | null | null | libqtile/resources/default_config.py | CharString/qtile | 7dc1448fa866d4c3a2a5c2738c5cf3fcd10b0631 | [
"MIT"
] | null | null | null | from libqtile.config import Key, Screen, Group
from libqtile.command import lazy
from libqtile import layout, bar, widget
mod = "mod4"
keys = [
# Switch between windows in current stack pane
Key(
[mod], "k",
lazy.layout.down()
),
Key(
[mod], "j",
lazy.layout.up()
),
# Move windows up or down in current stack
Key(
[mod, "control"], "k",
lazy.layout.shuffle_down()
),
Key(
[mod, "control"], "j",
lazy.layout.shuffle_up()
),
# Switch window focus to other pane(s) of stack
Key(
[mod], "space",
lazy.layout.next()
),
# Swap panes of split stack
Key(
[mod, "shift"], "space",
lazy.layout.rotate()
),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key(
[mod, "shift"], "Return",
lazy.layout.toggle_split()
),
Key([mod], "Return", lazy.spawn("xterm")),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.nextlayout()),
Key([mod], "w", lazy.window.kill()),
Key([mod, "control"], "r", lazy.restart()),
Key([mod], "r", lazy.spawncmd()),
]
groups = [
Group("a"),
Group("s"),
Group("d"),
Group("f"),
Group("u"),
Group("i"),
Group("o"),
Group("p"),
]
for i in groups:
# mod1 + letter of group = switch to group
keys.append(
Key([mod], i.name, lazy.group[i.name].toscreen())
)
# mod1 + shift + letter of group = switch to & move focused window to group
keys.append(
Key([mod, "shift"], i.name, lazy.window.togroup(i.name))
)
dgroups_key_binder = None
dgroups_app_rules = []
layouts = [
layout.Max(),
layout.Stack(stacks=2)
]
screens = [
Screen(
bottom=bar.Bar(
[
widget.GroupBox(),
widget.Prompt(),
widget.WindowName(),
widget.TextBox("default config", name="default"),
widget.Systray(),
widget.Clock('%Y-%m-%d %a %I:%M %p'),
],
30,
),
),
]
main = None
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating()
mouse = ()
auto_fullscreen = True
widget_defaults = {}
| 21.918919 | 79 | 0.542129 |
acee2fcdd202d9ce7923b117680c8983d7767a1e | 10,662 | py | Python | pybind/nos/v7_1_0/interface/fortygigabitethernet/switchport/private_vlan/host_association/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/fortygigabitethernet/switchport/private_vlan/host_association/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/fortygigabitethernet/switchport/private_vlan/host_association/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class host_association(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/fortygigabitethernet/switchport/private-vlan/host-association. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Host-association
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__host_pri_pvlan','__host_sec_pvlan',)
_yang_name = 'host-association'
_rest_name = 'host-association'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__host_pri_pvlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-pri-pvlan", rest_name="host-pri-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Primary vlan id', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
self.__host_sec_pvlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-sec-pvlan", rest_name="host-sec-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Secondary vlan id', u'cli-drop-node-name': None, u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'fortygigabitethernet', u'switchport', u'private-vlan', u'host-association']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'FortyGigabitEthernet', u'switchport', u'private-vlan', u'host-association']
def _get_host_pri_pvlan(self):
"""
Getter method for host_pri_pvlan, mapped from YANG variable /interface/fortygigabitethernet/switchport/private_vlan/host_association/host_pri_pvlan (vlan-type)
"""
return self.__host_pri_pvlan
def _set_host_pri_pvlan(self, v, load=False):
"""
Setter method for host_pri_pvlan, mapped from YANG variable /interface/fortygigabitethernet/switchport/private_vlan/host_association/host_pri_pvlan (vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_host_pri_pvlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host_pri_pvlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-pri-pvlan", rest_name="host-pri-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Primary vlan id', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host_pri_pvlan must be of a type compatible with vlan-type""",
'defined-type': "brocade-interface:vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-pri-pvlan", rest_name="host-pri-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Primary vlan id', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)""",
})
self.__host_pri_pvlan = t
if hasattr(self, '_set'):
self._set()
def _unset_host_pri_pvlan(self):
self.__host_pri_pvlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-pri-pvlan", rest_name="host-pri-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Primary vlan id', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
def _get_host_sec_pvlan(self):
"""
Getter method for host_sec_pvlan, mapped from YANG variable /interface/fortygigabitethernet/switchport/private_vlan/host_association/host_sec_pvlan (vlan-type)
"""
return self.__host_sec_pvlan
def _set_host_sec_pvlan(self, v, load=False):
"""
Setter method for host_sec_pvlan, mapped from YANG variable /interface/fortygigabitethernet/switchport/private_vlan/host_association/host_sec_pvlan (vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_host_sec_pvlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host_sec_pvlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-sec-pvlan", rest_name="host-sec-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Secondary vlan id', u'cli-drop-node-name': None, u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host_sec_pvlan must be of a type compatible with vlan-type""",
'defined-type': "brocade-interface:vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-sec-pvlan", rest_name="host-sec-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Secondary vlan id', u'cli-drop-node-name': None, u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)""",
})
self.__host_sec_pvlan = t
if hasattr(self, '_set'):
self._set()
def _unset_host_sec_pvlan(self):
self.__host_sec_pvlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="host-sec-pvlan", rest_name="host-sec-pvlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Secondary vlan id', u'cli-drop-node-name': None, u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
host_pri_pvlan = __builtin__.property(_get_host_pri_pvlan, _set_host_pri_pvlan)
host_sec_pvlan = __builtin__.property(_get_host_sec_pvlan, _set_host_sec_pvlan)
_pyangbind_elements = {'host_pri_pvlan': host_pri_pvlan, 'host_sec_pvlan': host_sec_pvlan, }
| 66.6375 | 637 | 0.734478 |
acee306ebb6b0d8538ab5047c9d60587a54384d8 | 461 | py | Python | tests/test_json.py | hukkinj1/mdformat-config | 63fbe2ab33940c7f93fa4803fce722efd92bb3ff | [
"MIT"
] | 1 | 2021-01-19T13:52:36.000Z | 2021-01-19T13:52:36.000Z | tests/test_json.py | hukkin/mdformat-config | e53692b39eba9e4cbe3563753b0a5f0127392eac | [
"MIT"
] | 1 | 2021-01-19T21:11:44.000Z | 2021-01-26T00:15:09.000Z | tests/test_json.py | hukkin/mdformat-config | e53692b39eba9e4cbe3563753b0a5f0127392eac | [
"MIT"
] | 1 | 2022-01-10T01:29:16.000Z | 2022-01-10T01:29:16.000Z | import mdformat
import mdformat_config
def test_format_json():
unformatted = """{"a": 1, "b": 2}"""
formatted = """{
"a": 1,
"b": 2
}
"""
assert mdformat_config.format_json(unformatted, "") == formatted
def test_format_json__integration():
unformatted_md = """~~~json
{"a": 1, "b": 2}
~~~
"""
formatted_md = """```json
{
"a": 1,
"b": 2
}
```
"""
assert mdformat.text(unformatted_md, codeformatters={"json"}) == formatted_md
| 15.896552 | 81 | 0.585683 |
acee311e1b833981176970e8b15db0a3f95153c7 | 5,207 | py | Python | src/main.py | vilelabruno/MarcowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | 2 | 2020-03-09T14:39:50.000Z | 2020-03-10T12:48:25.000Z | src/main.py | vilelabruno/MarkowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | null | null | null | src/main.py | vilelabruno/MarkowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | null | null | null | from pandas_datareader import data
import datetime
import pandas as pd
import numpy as np
import more_itertools as mit
def readFile(stockbroker):
shares = []
weight = []
with open("../tickers/"+stockbroker+".txt") as prtf:
shares = prtf.readline().split(",")
shares[len(shares)-1] = shares[len(shares)-1].strip()
weight = prtf.readline().split(",")
weight[len(weight)-1] = weight[len(weight)-1].strip()
return shares, weight
def marko1():
shares = []
monthReturn = []
weight = []
shares, weight = readFile("tickers")
#df = pd.read_csv('../tickers/generic.csv')
#shares = df['Ticker']
for x in ['IMRA', '1961.HK', '6918.HK', 'ENPH', '']:
shares.remove(x)
start_date = '2019-12-03'
end_date = '2020-03-03'
returnArr = []
df = data.DataReader(shares, 'yahoo', start_date, end_date)
df.reset_index(inplace=True)
#df[("Open", "TGAR11.SA")][df[("Date","")] == "2020-03-03"] = 131.80 #yahoo finance dont returning correct value here
#df.to_csv("dr.csv", index=False)
shDf = pd.DataFrame(shares,columns=["share"])
shDf.reset_index(inplace=True)
shDf["m1R"] = 0
shDf["m1P"] = 0
shDf["m2R"] = 0
shDf["m2P"] = 0
shDf["m3R"] = 0
shDf["m3P"] = 0
for share in shares:
cl1 = df[('Close', share)][df[("Date","")] == "2020-01-03"]
op1 = df[('Open', share)][df[("Date","")] == "2019-12-03"]
cl2 = df[('Close', share)][df[("Date","")] == "2020-02-05"]
op2 = df[('Open', share)][df[("Date","")] == "2020-01-03"]
cl3 = df[('Close', share)][df[("Date","")] == "2020-02-05"]
op3 = df[('Open', share)][df[("Date","")] == "2020-03-03"]
#print(op2.iloc[0])
shDf["m1R"][shDf["share"] == share] = cl1.iloc[0] - op1.iloc[0]
shDf["m1P"][shDf["share"] == share] = (cl1.iloc[0] - op1.iloc[0]) / df[('Open', share)][df[("Date","")] == "2019-12-03"].iloc[0]
shDf["m2R"][shDf["share"] == share] = (cl2.iloc[0] - op2.iloc[0])
shDf["m2P"][shDf["share"] == share] = (cl2.iloc[0] - op2.iloc[0]) / df[('Open', share)][df[("Date","")] == "2020-01-03"].iloc[0]
shDf["m3R"][shDf["share"] == share] = (cl3.iloc[0] - op3.iloc[0])
shDf["m3P"][shDf["share"] == share] = (cl3.iloc[0] - op3.iloc[0]) / df[('Open', share)][df[("Date","")] == "2020-03-03"].iloc[0]
shDf["m1E"] = (shDf["m1P"] - (shDf["m1P"]+shDf["m2P"]+shDf["m3P"])) * 100
shDf["m2E"] = (shDf["m2P"] - (shDf["m1P"]+shDf["m2P"]+shDf["m3P"])) * 100
shDf["m3E"] = (shDf["m3P"] - (shDf["m1P"]+shDf["m2P"]+shDf["m3P"])) * 100
shDf["m1EQ"] = shDf["m1E"] **2
shDf["m2EQ"] = shDf["m2E"] **2
shDf["m3EQ"] = shDf["m3E"] **2
shDf["variance"] = (shDf["m1EQ"] +shDf["m2EQ"] +shDf["m3EQ"]) * (1/(3-1))
shDf["stdDev"] = shDf["variance"] ** (1/2)
shDf["mi"] = (shDf["m1E"] + shDf["m2E"] + shDf["m3E"]) /3
assets = shDf.share.values
print(shDf)
for asset1 in assets:
shDf[asset1] = 0
for asset1 in assets:
for asset2 in assets:
shDf[asset1][shDf["share"] == asset2] = (1/2) * ((shDf["m1E"][shDf["share"] == asset1].iloc[0] * shDf["m1E"][shDf["share"] == asset2].iloc[0]) +\
(shDf["m2E"][shDf["share"] == asset1].iloc[0] * shDf["m2E"][shDf["share"] == asset2].iloc[0]) +\
(shDf["m3E"][shDf["share"] == asset1].iloc[0] * shDf["m3E"][shDf["share"] == asset2].iloc[0]))
print(shDf)
#cov = (1/2) * ( (m1s1 * m1s2* m1s3* m1s4* m1s5* m1s6* m1s7* m1s8) +(m2s1 * m2s2* m2s3* m2s4* m2s5* m2s6* m2s7* m2s8) +(m3s1 * m3s2* m3s3* m3s4* m3s5* m3s6* m3s7* m3s8))
#print('Covariance: '+str(cov))
shDf.to_csv("shDf2.csv", index=False)
##print(shDf)
#corre = cov / shDf['stdDev'].prod() #Correlation
#print('Correlation: '+str(corre))
def marko2():
shares, weight = readFile('necton')
start_date = '2019-12-03'
end_date = '2020-03-03'
df = data.DataReader(shares, 'yahoo', start_date, end_date)
df.reset_index(inplace=True)
df[("Close", "TGAR11.SA")][df[("Date","")] == "2020-02-28"] = 131.80
months = [12, 1, 2]
ri = []
for share in shares:
for m in months:
first, last = getFirstLastDaysOfMonth(m, df)
init = df['Open'][share][df['Date'] == first].iloc[0]
fin = df['Close'][share][df['Date'] == last].iloc[0]
ror = ((fin - init) / init) * 100 # Rate of return
ri.append(ror/3)
eror = 0
for i in range(0, len(weight)):
eror += float(weight[i]) * float(ri[i])
print('Extimated Rate of Return: '+str(eror))
def marko3():
return True
def marko4():
return True
def marko5():
return True
def marko6():
return True
def marko7():
return True
def getFirstLastDaysOfMonth(month, dataframe):
first = 0
last = 0
first = mit.first(dataframe['Date'][dataframe['Date'].apply(lambda x: x.month) == month])
last = mit.last(dataframe['Date'][dataframe['Date'].apply(lambda x: x.month) == month])
return str(first.date()), str(last.date())
#if __name__ == 'main':
marko1()
#marko2()
#marko3()
marko4()
marko5()
marko6()
marko7() | 38.286765 | 173 | 0.541771 |
acee3146f10e9fe8d407ff674652219edf0b28ea | 15,744 | py | Python | train_GMM_and_compute_EVE_scores.py | ice1ce/EVE | 7d4f336bc76aea54e7523f1f88590d2d95d6ffd2 | [
"MIT"
] | 50 | 2021-10-29T18:31:20.000Z | 2022-03-15T17:47:31.000Z | train_GMM_and_compute_EVE_scores.py | ice1ce/EVE | 7d4f336bc76aea54e7523f1f88590d2d95d6ffd2 | [
"MIT"
] | 6 | 2021-06-17T04:00:14.000Z | 2022-03-23T13:29:26.000Z | train_GMM_and_compute_EVE_scores.py | ice1ce/EVE | 7d4f336bc76aea54e7523f1f88590d2d95d6ffd2 | [
"MIT"
] | 13 | 2021-04-20T09:00:45.000Z | 2022-02-06T22:18:53.000Z | import os
import numpy as np
import pandas as pd
import argparse
import pickle
import tqdm
import json
from sklearn import mixture, linear_model, svm, gaussian_process
from utils import performance_helpers as ph, plot_helpers
if __name__=='__main__':
parser = argparse.ArgumentParser(description='GMM fit and EVE scores computation')
parser.add_argument('--input_evol_indices_location', type=str, help='Folder where all individual files with evolutionary indices are stored')
parser.add_argument('--input_evol_indices_filename_suffix', type=str, default='', help='Suffix that was added when generating the evol indices files')
parser.add_argument('--protein_list', type=str, help='List of proteins to be included (one per row)')
parser.add_argument('--output_eve_scores_location', type=str, help='Folder where all EVE scores are stored')
parser.add_argument('--output_eve_scores_filename_suffix', default='', type=str, help='(Optional) Suffix to be added to output filename')
parser.add_argument('--load_GMM_models', default=False, action='store_true', help='If True, load GMM model parameters. If False, train GMMs from evol indices files')
parser.add_argument('--GMM_parameter_location', default=None, type=str, help='Folder where GMM objects are stored if loading / to be stored if we are re-training')
parser.add_argument('--GMM_parameter_filename_suffix', default=None, type=str, help='Suffix of GMMs model files to load')
parser.add_argument('--protein_GMM_weight', default=0.3, type=float, help='Value of global-local GMM mixing parameter')
parser.add_argument('--compute_EVE_scores', default=False, action='store_true', help='Computes EVE scores and uncertainty metrics for all input protein mutations')
parser.add_argument('--recompute_uncertainty_threshold', default=False, action='store_true', help='Recompute uncertainty thresholds based on all evol indices in file. Otherwise loads default threhold.')
parser.add_argument('--default_uncertainty_threshold_file_location', default='./utils/default_uncertainty_threshold.json', type=str, help='Location of default uncertainty threholds.')
parser.add_argument('--plot_histograms', default=False, action='store_true', help='Plots all evol indices histograms with GMM fits')
parser.add_argument('--plot_scores_vs_labels', default=False, action='store_true', help='Plots EVE scores Vs labels at each protein position')
parser.add_argument('--labels_file_location', default=None, type=str, help='File with ground truth labels for all proteins of interest (e.g., ClinVar)')
parser.add_argument('--plot_location', default=None, type=str, help='Location of the different plots')
parser.add_argument('--verbose', action='store_true', help='Print detailed information during run')
args = parser.parse_args()
mapping_file = pd.read_csv(args.protein_list,low_memory=False)
protein_list = np.unique(mapping_file['protein_name'])
list_variables_to_keep=['protein_name','mutations','evol_indices']
all_evol_indices = pd.concat([pd.read_csv(args.input_evol_indices_location+os.sep+protein+args.input_evol_indices_filename_suffix+'.csv',low_memory=False)[list_variables_to_keep] \
for protein in protein_list if os.path.exists(args.input_evol_indices_location+os.sep+protein+args.input_evol_indices_filename_suffix+'.csv')], ignore_index=True)
all_evol_indices = all_evol_indices.drop_duplicates()
X_train = np.array(all_evol_indices['evol_indices']).reshape(-1, 1)
if args.verbose:
print("Training data size: "+str(len(X_train)))
print("Number of distinct proteins in protein_list: "+str(len(np.unique(all_evol_indices['protein_name']))))
if args.load_GMM_models:
dict_models = pickle.load( open( args.GMM_parameter_location+os.sep+'GMM_model_dictionary_'+args.GMM_parameter_filename_suffix, "rb" ) )
dict_pathogenic_cluster_index = pickle.load( open( args.GMM_parameter_location+os.sep+'GMM_pathogenic_cluster_index_dictionary_'+args.GMM_parameter_filename_suffix, "rb" ) )
else:
dict_models = {}
dict_pathogenic_cluster_index = {}
if not os.path.exists(args.GMM_parameter_location+os.sep+args.output_eve_scores_filename_suffix):
os.makedirs(args.GMM_parameter_location+os.sep+args.output_eve_scores_filename_suffix)
GMM_stats_log_location=args.GMM_parameter_location+os.sep+args.output_eve_scores_filename_suffix+os.sep+'GMM_stats_'+args.output_eve_scores_filename_suffix+'.csv'
with open(GMM_stats_log_location, "a") as logs:
logs.write("protein_name,weight_pathogenic,mean_pathogenic,mean_benign,std_dev_pathogenic,std_dev_benign\n")
main_GMM = mixture.GaussianMixture(n_components=2, covariance_type='full',max_iter=1000,n_init=30,tol=1e-4)
main_GMM.fit(X_train)
dict_models['main'] = main_GMM
pathogenic_cluster_index = np.argmax(np.array(main_GMM.means_).flatten()) #The pathogenic cluster is the cluster with higher mean value
dict_pathogenic_cluster_index['main'] = pathogenic_cluster_index
if args.verbose:
inferred_params = main_GMM.get_params()
print("Index of mixture component with highest mean: "+str(pathogenic_cluster_index))
print("Model parameters: "+str(inferred_params))
print("Mixture component weights: "+str(main_GMM.weights_))
print("Mixture component means: "+str(main_GMM.means_))
print("Cluster component cov: "+str(main_GMM.covariances_))
with open(GMM_stats_log_location, "a") as logs:
logs.write(",".join(str(x) for x in [
'main', np.array(main_GMM.weights_).flatten()[dict_pathogenic_cluster_index['main']], np.array(main_GMM.means_).flatten()[dict_pathogenic_cluster_index['main']],
np.array(main_GMM.means_).flatten()[1 - dict_pathogenic_cluster_index['main']], np.sqrt(np.array(main_GMM.covariances_).flatten()[dict_pathogenic_cluster_index['main']]),
np.sqrt(np.array(main_GMM.covariances_).flatten()[1 - dict_pathogenic_cluster_index['main']])
])+"\n")
if args.protein_GMM_weight > 0.0:
for protein in tqdm.tqdm(protein_list, "Training all protein GMMs"):
X_train_protein = np.array(all_evol_indices['evol_indices'][all_evol_indices.protein_name==protein]).reshape(-1, 1)
if len(X_train_protein) > 0: #We have evol indices computed for protein on file
protein_GMM = mixture.GaussianMixture(n_components=2,covariance_type='full',max_iter=1000,tol=1e-4,weights_init=main_GMM.weights_,means_init=main_GMM.means_,precisions_init=main_GMM.precisions_)
protein_GMM.fit(X_train_protein)
dict_models[protein] = protein_GMM
dict_pathogenic_cluster_index[protein] = np.argmax(np.array(protein_GMM.means_).flatten())
with open(GMM_stats_log_location, "a") as logs:
logs.write(",".join(str(x) for x in [
protein, np.array(protein_GMM.weights_).flatten()[dict_pathogenic_cluster_index[protein]], np.array(protein_GMM.means_).flatten()[dict_pathogenic_cluster_index[protein]],
np.array(protein_GMM.means_).flatten()[1 - dict_pathogenic_cluster_index[protein]], np.sqrt(np.array(protein_GMM.covariances_).flatten()[dict_pathogenic_cluster_index[protein]]),
np.sqrt(np.array(protein_GMM.covariances_).flatten()[1 - dict_pathogenic_cluster_index[protein]])
])+"\n")
else:
if args.verbose:
print("No evol indices for the protein: "+str(protein)+". Skipping.")
pickle.dump(dict_models, open(args.GMM_parameter_location+os.sep+args.output_eve_scores_filename_suffix+os.sep+'GMM_model_dictionary_'+args.output_eve_scores_filename_suffix, 'wb'))
pickle.dump(dict_pathogenic_cluster_index, open(args.GMM_parameter_location+os.sep+args.output_eve_scores_filename_suffix+os.sep+'GMM_pathogenic_cluster_index_dictionary_'+args.output_eve_scores_filename_suffix, 'wb'))
if args.plot_histograms:
if not os.path.exists(args.plot_location+os.sep+'plots_histograms'+os.sep+args.output_eve_scores_filename_suffix):
os.makedirs(args.plot_location+os.sep+'plots_histograms'+os.sep+args.output_eve_scores_filename_suffix)
plot_helpers.plot_histograms(all_evol_indices, dict_models, dict_pathogenic_cluster_index, args.protein_GMM_weight, args.plot_location+os.sep+'plots_histograms'+os.sep+args.output_eve_scores_filename_suffix, args.output_eve_scores_filename_suffix, protein_list)
if args.compute_EVE_scores:
if args.protein_GMM_weight > 0.0:
all_scores = all_evol_indices.copy()
all_scores['EVE_scores'] = np.nan
all_scores['EVE_classes_100_pct_retained'] = ""
for protein in tqdm.tqdm(protein_list,"Scoring all protein mutations"):
try:
test_data_protein = all_scores[all_scores.protein_name==protein]
X_test_protein = np.array(test_data_protein['evol_indices']).reshape(-1, 1)
mutation_scores_protein = ph.compute_weighted_score_two_GMMs(X_pred=X_test_protein,
main_model = dict_models['main'],
protein_model=dict_models[protein],
cluster_index_main = dict_pathogenic_cluster_index['main'],
cluster_index_protein = dict_pathogenic_cluster_index[protein],
protein_weight = args.protein_GMM_weight)
gmm_class_protein = ph.compute_weighted_class_two_GMMs(X_pred=X_test_protein,
main_model = dict_models['main'],
protein_model=dict_models[protein],
cluster_index_main = dict_pathogenic_cluster_index['main'],
cluster_index_protein = dict_pathogenic_cluster_index[protein],
protein_weight = args.protein_GMM_weight)
gmm_class_label_protein = pd.Series(gmm_class_protein).map(lambda x: 'Pathogenic' if x == 1 else 'Benign')
all_scores.loc[all_scores.protein_name==protein, 'EVE_scores'] = np.array(mutation_scores_protein)
all_scores.loc[all_scores.protein_name==protein, 'EVE_classes_100_pct_retained'] = np.array(gmm_class_label_protein)
except:
print("Issues with protein: "+str(protein)+". Skipping.")
else:
all_scores = all_evol_indices.copy()
mutation_scores = dict_models['main'].predict_proba(np.array(all_scores['evol_indices']).reshape(-1, 1))
all_scores['EVE_scores'] = mutation_scores[:,dict_pathogenic_cluster_index['main']]
gmm_class = dict_models['main'].predict(np.array(all_scores['evol_indices']).reshape(-1, 1))
all_scores['EVE_classes_100_pct_retained'] = np.array(pd.Series(gmm_class).map(lambda x: 'Pathogenic' if x == dict_pathogenic_cluster_index['main'] else 'Benign'))
len_before_drop_na = len(all_scores)
all_scores = all_scores.dropna(subset=['EVE_scores'])
len_after_drop_na = len(all_scores)
if args.verbose:
scores_stats = ph.compute_stats(all_scores['EVE_scores'])
print("Score stats: "+str(scores_stats))
print("Dropped mutations due to missing EVE scores: "+str(len_after_drop_na-len_before_drop_na))
all_scores['uncertainty'] = ph.predictive_entropy_binary_classifier(all_scores['EVE_scores'])
if args.recompute_uncertainty_threshold:
uncertainty_cutoffs_deciles, _, _ = ph.compute_uncertainty_deciles(all_scores)
uncertainty_cutoffs_quartiles, _, _ = ph.compute_uncertainty_quartiles(all_scores)
if args.verbose:
print("Uncertainty cutoffs deciles: "+str(uncertainty_cutoffs_deciles))
print("Uncertainty cutoffs quartiles: "+str(uncertainty_cutoffs_quartiles))
else:
uncertainty_thresholds = json.load(open(args.default_uncertainty_threshold_file_location))
uncertainty_cutoffs_deciles = uncertainty_thresholds["deciles"]
uncertainty_cutoffs_quartiles = uncertainty_thresholds["quartiles"]
for decile in range(1,10):
classification_name = 'EVE_classes_'+str((decile)*10)+"_pct_retained"
all_scores[classification_name] = all_scores['EVE_classes_100_pct_retained']
all_scores.loc[all_scores['uncertainty'] > uncertainty_cutoffs_deciles[str(decile)], classification_name] = 'Uncertain'
if args.verbose:
print("Stats classification by uncertainty for decile #:"+str(decile))
print(all_scores[classification_name].value_counts(normalize=True))
if args.verbose:
print("Stats classification by uncertainty for decile #:"+str(10))
print(all_scores['EVE_classes_100_pct_retained'].value_counts(normalize=True))
for quartile in [1,3]:
classification_name = 'EVE_classes_'+str((quartile)*25)+"_pct_retained"
all_scores[classification_name] = all_scores['EVE_classes_100_pct_retained']
all_scores.loc[all_scores['uncertainty'] > uncertainty_cutoffs_quartiles[str(quartile)], classification_name] = 'Uncertain'
if args.verbose:
print("Stats classification by uncertainty for quartile #:"+str(quartile))
print(all_scores[classification_name].value_counts(normalize=True))
all_scores.to_csv(args.output_eve_scores_location+os.sep+'all_EVE_scores_'+args.output_eve_scores_filename_suffix+'.csv', index=False)
if args.plot_scores_vs_labels:
labels_dataset=pd.read_csv(args.labels_file_location,low_memory=False)
all_scores_mutations_with_labels = pd.merge(all_scores, labels_dataset[['protein_name','mutations','ClinVar_labels']], how='inner', on=['protein_name','mutations'])
all_PB_scores = all_scores_mutations_with_labels[all_scores_mutations_with_labels.ClinVar_labels!=0.5].copy()
if not os.path.exists(args.plot_location+os.sep+'plots_scores_vs_labels'+os.sep+args.output_eve_scores_filename_suffix):
os.makedirs(args.plot_location+os.sep+'plots_scores_vs_labels'+os.sep+args.output_eve_scores_filename_suffix)
for protein in tqdm.tqdm(protein_list,"Plot scores Vs labels"):
plot_helpers.plot_scores_vs_labels(score_df=all_PB_scores[all_PB_scores.protein_name==protein],
plot_location=args.plot_location+os.sep+'plots_scores_vs_labels'+os.sep+args.output_eve_scores_filename_suffix,
output_eve_scores_filename_suffix=args.output_eve_scores_filename_suffix+'_'+protein,
mutation_name='mutations', score_name="EVE_scores", label_name='ClinVar_labels') | 82 | 269 | 0.683626 |
acee329a9a4b9eb045c3224e9eeb1c0b72785454 | 7,890 | py | Python | contrib/runners/remote_runner/remote_runner/remote_script_runner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | null | null | null | contrib/runners/remote_runner/remote_runner/remote_script_runner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 1 | 2020-03-19T23:45:59.000Z | 2020-03-19T23:45:59.000Z | contrib/runners/remote_runner/remote_runner/remote_script_runner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import traceback
import uuid
from oslo_config import cfg
from st2common import log as logging
from st2common.runners.paramiko_ssh_runner import RUNNER_REMOTE_DIR
from st2common.runners.paramiko_ssh_runner import BaseParallelSSHRunner
from st2common.runners.base import get_metadata as get_runner_metadata
from st2common.models.system.paramiko_script_action import ParamikoRemoteScriptAction
__all__ = [
'ParamikoRemoteScriptRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
class ParamikoRemoteScriptRunner(BaseParallelSSHRunner):
def run(self, action_parameters):
remote_action = self._get_remote_action(action_parameters)
LOG.debug('Executing remote action.', extra={'_action_params': remote_action})
result = self._run(remote_action)
LOG.debug('Executed remote action.', extra={'_result': result})
status = self._get_result_status(result, cfg.CONF.ssh_runner.allow_partial_failure)
return (status, result, None)
def _run(self, remote_action):
try:
copy_results = self._copy_artifacts(remote_action)
except:
# If for whatever reason there is a top level exception,
# we just bail here.
error = 'Failed copying content to remote boxes.'
LOG.exception(error)
_, ex, tb = sys.exc_info()
copy_results = self._generate_error_results(' '.join([error, str(ex)]), tb)
return copy_results
try:
exec_results = self._run_script_on_remote_host(remote_action)
try:
remote_dir = remote_action.get_remote_base_dir()
LOG.debug('Deleting remote execution dir.', extra={'_remote_dir': remote_dir})
delete_results = self._parallel_ssh_client.delete_dir(path=remote_dir,
force=True)
LOG.debug('Deleted remote execution dir.', extra={'_result': delete_results})
except:
LOG.exception('Failed deleting remote dir.', extra={'_remote_dir': remote_dir})
finally:
return exec_results
except:
error = 'Failed executing script on remote boxes.'
LOG.exception(error, extra={'_action_params': remote_action})
_, ex, tb = sys.exc_info()
exec_results = self._generate_error_results(' '.join([error, str(ex)]), tb)
return exec_results
def _copy_artifacts(self, remote_action):
# First create remote execution directory.
remote_dir = remote_action.get_remote_base_dir()
LOG.debug('Creating remote execution dir.', extra={'_path': remote_dir})
mkdir_result = self._parallel_ssh_client.mkdir(path=remote_action.get_remote_base_dir())
# Copy the script to remote dir in remote host.
local_script_abs_path = remote_action.get_local_script_abs_path()
remote_script_abs_path = remote_action.get_remote_script_abs_path()
file_mode = 0o744
extra = {'_local_script': local_script_abs_path, '_remote_script': remote_script_abs_path,
'mode': file_mode}
LOG.debug('Copying local script to remote box.', extra=extra)
put_result_1 = self._parallel_ssh_client.put(local_path=local_script_abs_path,
remote_path=remote_script_abs_path,
mirror_local_mode=False, mode=file_mode)
# If `lib` exist for the script, copy that to remote host.
local_libs_path = remote_action.get_local_libs_path_abs()
if os.path.exists(local_libs_path):
extra = {'_local_libs': local_libs_path, '_remote_path': remote_dir}
LOG.debug('Copying libs to remote host.', extra=extra)
put_result_2 = self._parallel_ssh_client.put(local_path=local_libs_path,
remote_path=remote_dir,
mirror_local_mode=True)
result = mkdir_result or put_result_1 or put_result_2
return result
def _run_script_on_remote_host(self, remote_action):
command = remote_action.get_full_command_string()
LOG.info('Command to run: %s', command)
results = self._parallel_ssh_client.run(command, timeout=remote_action.get_timeout())
LOG.debug('Results from script: %s', results)
return results
def _get_remote_action(self, action_parameters):
# remote script actions without entry_point don't make sense, user probably wanted to use
# "remote-shell-cmd" action
if not self.entry_point:
msg = ('Action "%s" is missing "entry_point" attribute. Perhaps wanted to use '
'"remote-shell-script" runner?' % (self.action_name))
raise Exception(msg)
script_local_path_abs = self.entry_point
pos_args, named_args = self._get_script_args(action_parameters)
named_args = self._transform_named_args(named_args)
env_vars = self._get_env_vars()
remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,
cfg.CONF.ssh_runner.remote_dir)
remote_dir = os.path.join(remote_dir, self.liveaction_id)
return ParamikoRemoteScriptAction(self.action_name,
str(self.liveaction_id),
script_local_path_abs,
self.libs_dir_path,
named_args=named_args,
positional_args=pos_args,
env_vars=env_vars,
on_behalf_user=self._on_behalf_user,
user=self._username,
password=self._password,
private_key=self._private_key,
remote_dir=remote_dir,
hosts=self._hosts,
parallel=self._parallel,
sudo=self._sudo,
sudo_password=self._sudo_password,
timeout=self._timeout,
cwd=self._cwd)
@staticmethod
def _generate_error_results(error, tb):
error_dict = {
'error': error,
'traceback': ''.join(traceback.format_tb(tb, 20)) if tb else '',
'failed': True,
'succeeded': False,
'return_code': 255
}
return error_dict
def get_runner():
return ParamikoRemoteScriptRunner(str(uuid.uuid4()))
def get_metadata():
return get_runner_metadata('remote_script_runner')
| 46.411765 | 98 | 0.61052 |
acee3326ed67e9e805a67a203c3d7b456b8cd92b | 2,570 | py | Python | src/xm/core/ProposalRecord.py | xm-blockchain/xm-core | 2282b435a02f061424d656155756d8f50238bcfd | [
"MIT"
] | null | null | null | src/xm/core/ProposalRecord.py | xm-blockchain/xm-core | 2282b435a02f061424d656155756d8f50238bcfd | [
"MIT"
] | 1 | 2020-11-26T00:07:31.000Z | 2020-11-26T00:07:31.000Z | src/xm/core/ProposalRecord.py | xm-blockchain/xm-core | 2282b435a02f061424d656155756d8f50238bcfd | [
"MIT"
] | null | null | null | from collections import namedtuple
from xm.generated import xm_pb2
from xm.core.misc import logger
from xm.core.State import State
class ProposalRecord:
def __init__(self, protobuf_block=None):
self._data = protobuf_block
if protobuf_block is None:
self._data = xm_pb2.ProposalRecord()
counter_mapping = namedtuple("counter_mapping", ["get", "update"])
self._counter_by_name = {
b'p_proposal_tx_hash': counter_mapping(self.number_of_tx_hashes,
self.update_number_of_tx_hashes),
}
def pbdata(self):
return self._data
def get_counter_by_name(self, name: bytes):
return self._counter_by_name[name].get()
def update_counter_by_name(self, name, value=1, subtract=False):
self._counter_by_name[name].update(value, subtract)
def number_of_tx_hashes(self):
return self._data.number_of_tx_hashes
def update_number_of_tx_hashes(self, value=1, subtract=False):
if subtract:
self._data.number_of_tx_hashes -= value
else:
self._data.number_of_tx_hashes += value
def serialize(self):
return self._data.SerializeToString()
@staticmethod
def deserialize(data):
pbdata = xm_pb2.ProposalRecord()
pbdata.ParseFromString(bytes(data))
return ProposalRecord(pbdata)
@staticmethod
def put_state(state: State, key, proposal_record, batch):
try:
state._db.put_raw(key, proposal_record.serialize(), batch)
except Exception as e:
raise Exception("[ProposalRecord] Exception in put_state %s", e)
@staticmethod
def get_state(state: State, key: bytes):
try:
data = state._db.get_raw(key)
return ProposalRecord.deserialize(data)
except KeyError:
logger.debug('[get_state] ProposalRecord not found')
except Exception as e:
logger.error('[get_state] %s', e)
return ProposalRecord()
@staticmethod
def get_key(block_number, activation_delay):
"""
activation_block_number is the block number after which config will be activated
so an activation block number 10 means the config will be activated only after adding
block number 10 into the block chain
"""
activation_block_number = block_number + activation_delay
return b'proposal_record_block_number_' + activation_block_number.to_bytes(8, byteorder='big', signed=False)
| 33.815789 | 116 | 0.657977 |
acee34246d1b641323d4fdf53591a5d24fd6df2d | 7,031 | py | Python | examples/pwr_run/checkpointing/socket_short/random2/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/socket_short/random2/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/socket_short/random2/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_random2/' + job_name + '*'
total_epochs = 6
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_random2/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.248889 | 118 | 0.703598 |
acee34397c33b9d45ee9fe22bb6d790a816da23c | 5,111 | py | Python | examples/common/callback-server.py | i3thuan5/pymodbus | 5124afcb65181e5f800a05f42a932da15800189f | [
"W3C"
] | null | null | null | examples/common/callback-server.py | i3thuan5/pymodbus | 5124afcb65181e5f800a05f42a932da15800189f | [
"W3C"
] | null | null | null | examples/common/callback-server.py | i3thuan5/pymodbus | 5124afcb65181e5f800a05f42a932da15800189f | [
"W3C"
] | null | null | null | #!/usr/bin/env python
'''
Pymodbus Server With Callbacks
--------------------------------------------------------------------------
This is an example of adding callbacks to a running modbus server
when a value is written to it. In order for this to work, it needs
a device-mapping file.
'''
#---------------------------------------------------------------------------#
# import the modbus libraries we need
#---------------------------------------------------------------------------#
from pymodbus.server.async import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
from pymodbus.compat import iterkeys
#---------------------------------------------------------------------------#
# import the python libraries we need
#---------------------------------------------------------------------------#
from multiprocessing import Queue, Process
#---------------------------------------------------------------------------#
# configure the service logging
#---------------------------------------------------------------------------#
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
#---------------------------------------------------------------------------#
# create your custom data block with callbacks
#---------------------------------------------------------------------------#
class CallbackDataBlock(ModbusSparseDataBlock):
''' A datablock that stores the new value in memory
and passes the operation to a message queue for further
processing.
'''
def __init__(self, devices, queue):
'''
'''
self.devices = devices
self.queue = queue
values = {k:0 for k in iterkeys(devices)}
values[0xbeef] = len(values) # the number of devices
super(CallbackDataBlock, self).__init__(values)
def setValues(self, address, value):
''' Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
'''
super(CallbackDataBlock, self).setValues(address, value)
self.queue.put((self.devices.get(address, None), value))
#---------------------------------------------------------------------------#
# define your callback process
#---------------------------------------------------------------------------#
def rescale_value(value):
''' Rescale the input value from the range
of 0..100 to -3200..3200.
:param value: The input value to scale
:returns: The rescaled value
'''
s = 1 if value >= 50 else -1
c = value if value < 50 else (value - 50)
return s * (c * 64)
def device_writer(queue):
''' A worker process that processes new messages
from a queue to write to device outputs
:param queue: The queue to get new messages from
'''
while True:
device, value = queue.get()
scaled = rescale_value(value[0])
log.debug("Write(%s) = %s" % (device, value))
if not device: continue
# do any logic here to update your devices
#---------------------------------------------------------------------------#
# initialize your device map
#---------------------------------------------------------------------------#
def read_device_map(path):
''' A helper method to read the device
path to address mapping from file::
0x0001,/dev/device1
0x0002,/dev/device2
:param path: The path to the input file
:returns: The input mapping file
'''
devices = {}
with open(path, 'r') as stream:
for line in stream:
piece = line.strip().split(',')
devices[int(piece[0], 16)] = piece[1]
return devices
#---------------------------------------------------------------------------#
# initialize your data store
#---------------------------------------------------------------------------#
queue = Queue()
devices = read_device_map("device-mapping")
block = CallbackDataBlock(devices, queue)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
#---------------------------------------------------------------------------#
# initialize the server information
#---------------------------------------------------------------------------#
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '1.0'
#---------------------------------------------------------------------------#
# run the server you want
#---------------------------------------------------------------------------#
p = Process(target=device_writer, args=(queue,))
p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
| 38.141791 | 78 | 0.494619 |
acee3460d0381e5cc976f5ca858c5161a7853db3 | 111 | py | Python | mosaic/features/__init__.py | joshloyal/Mosaic | 7960e6f3ff958082f273dff6b05e1378d1b7a4b6 | [
"MIT"
] | null | null | null | mosaic/features/__init__.py | joshloyal/Mosaic | 7960e6f3ff958082f273dff6b05e1378d1b7a4b6 | [
"MIT"
] | null | null | null | mosaic/features/__init__.py | joshloyal/Mosaic | 7960e6f3ff958082f273dff6b05e1378d1b7a4b6 | [
"MIT"
] | null | null | null | from mosaic.features.hsv import *
from mosaic.features.color import *
from mosaic.features.processing import *
| 27.75 | 40 | 0.810811 |
acee349b5c3d500c9a821926ec57eff9d83593f6 | 5,328 | py | Python | usr/examples/video.py | SmartArduino/openmv | 9398ae0248b5f4dabb622f8890ad351e163b80c4 | [
"MIT"
] | null | null | null | usr/examples/video.py | SmartArduino/openmv | 9398ae0248b5f4dabb622f8890ad351e163b80c4 | [
"MIT"
] | null | null | null | usr/examples/video.py | SmartArduino/openmv | 9398ae0248b5f4dabb622f8890ad351e163b80c4 | [
"MIT"
] | 1 | 2020-11-28T14:41:49.000Z | 2020-11-28T14:41:49.000Z | # Copy this module to storage and import it if you want
# to use it in your own scripts. See example usage below.
import ustruct as struct
class AVI:
def __init__(self, path, w, h, codec="MJPG"):
self.w = w
self.h = h
self.codec = codec
self.size = 0
self.frames = 0
self.fp = open(path, "w")
self.fp.seek(224) #skip headers
def avi_hdr(self):
hdr = struct.pack("I", int(1000/self.fps)) # Time delay between frames
hdr += struct.pack("I", 0) # Data rate of AVI data
hdr += struct.pack("I", 1) # Size of single unit of padding
hdr += struct.pack("I", 0) # Flags
hdr += struct.pack("I", self.frames)# Number of video frame stored
hdr += struct.pack("I", 0) # Number of intial frames
hdr += struct.pack("I", 1) # Number of data streams in chunk
hdr += struct.pack("I", 0) # Minimum playback buffer size
hdr += struct.pack("I", self.w) # Width of video frame in pixels
hdr += struct.pack("I", self.h) # Height of video frame in pixels
hdr += struct.pack("I", 1) # Time scale
hdr += struct.pack("I", self.fps) # Data rate of playback
hdr += struct.pack("I", 0) # Starting time of AVI data
hdr += struct.pack("I", 0) # Size of AVI data chunk
return hdr;
def str_hdr(self):
hdr = struct.pack("4s", "vids") # Stream type
hdr += struct.pack("4s", self.codec)# Stream codec
hdr += struct.pack("I", 0) # Flags
hdr += struct.pack("I", 0) # Priority
hdr += struct.pack("I", 0) # Number of first frame
hdr += struct.pack("I", 1) # Time scale
hdr += struct.pack("I", self.fps) # Data rate of playback
hdr += struct.pack("I", 0) # Starting time of AVI data
hdr += struct.pack("I", 0) # Data length
hdr += struct.pack("I", 0) # Buffer size
hdr += struct.pack("I", 0) # Sample quailty factor
hdr += struct.pack("I", 0) # Size of the sample in bytes
hdr += struct.pack("II",0,0) # Rect
return hdr;
def str_fmt(self):
#BITMAPINFOHEADER
hdr = struct.pack("I", 40) # Size in bytes
hdr += struct.pack("I", self.w) # Width
hdr += struct.pack("I", self.h) # Height
hdr += struct.pack("H", 1) # Planes
hdr += struct.pack("H", 16) # Bits per pixel
hdr += struct.pack("4s", self.codec) # This should be BI_JPEG, but ffmpeg writes "MJPG"
hdr += struct.pack("I", 0) # Image size (which one?)
hdr += struct.pack("I", 0) # X pixels-per-meter
hdr += struct.pack("I", 0) # Y pixels-per-meter
hdr += struct.pack("I", 0) # color indexes in the color table
hdr += struct.pack("I", 0) # required color indexes in the color table
return hdr;
def new_chunk(self, c_id, c_data):
return c_id +\
struct.pack("I", len(c_data)) +\
c_data
def new_list(self, l_id, l_4cc, l_size, l_data):
return l_id +\
struct.pack("I", l_size+len(l_data)+4) +\
struct.pack("4s", l_4cc) +\
l_data
def add_frame(self, img):
self.frames +=1
self.size += img.size()
self.fp.write(struct.pack("4sI", "00dc", img.size()))
self.fp.write(img)
def flush(self, fps):
self.fps = fps
self.fp.seek(0)
self.fp.write(
self.new_list(b"RIFF", b"AVI ", self.size,
self.new_list(b"LIST", b"hdrl", 0,
self.new_chunk(b"avih", self.avi_hdr())
+ self.new_list(b"LIST", b"strl", 0,
self.new_chunk(b"strh", self.str_hdr())
+ self.new_chunk(b"strf", self.str_fmt())
)
+ self.new_list(b"LIST", b"movi", self.size, b"")
)
)
)
self.fp.close()
if __name__ == "__main__":
import sensor, time, pyb
#from avi import AVI
# Recording length in seconds
REC_LENGTH = 10
# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(2)
sensor.set_framesize(sensor.VGA)
# Enable JPEG and set quality
sensor.set_pixformat(sensor.JPEG)
sensor.set_quality(95)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
sensor.snapshot()
# Create red LED object
led = pyb.LED(1)
# Create video file
video = AVI("%d.mjpeg"%pyb.rng(), 640, 480)
# Recording clocks
led.on()
clock = time.clock()
start = time.ticks()
# Start recording
while ((time.ticks()-start) < (REC_LENGTH*1000)):
clock.tick()
img = sensor.snapshot()
video.add_frame(img)
led.off()
# Flush video file
video.flush(int(clock.fps()))
# Done, flash blue LED
led = pyb.LED(3)
while (True):
led.on()
time.sleep(500)
led.off()
time.sleep(500)
| 36.244898 | 95 | 0.51952 |
acee36010f8e03da04f83e4a6417f663212bf3c4 | 698 | py | Python | docs/conf.py | aarroisi/django-postgres-copy | 125eb361a5f0cc46bac2e2a8917554dfdcf99316 | [
"MIT"
] | null | null | null | docs/conf.py | aarroisi/django-postgres-copy | 125eb361a5f0cc46bac2e2a8917554dfdcf99316 | [
"MIT"
] | null | null | null | docs/conf.py | aarroisi/django-postgres-copy | 125eb361a5f0cc46bac2e2a8917554dfdcf99316 | [
"MIT"
] | null | null | null | from datetime import datetime
extensions = []
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u'django-postgres-copy'
year = datetime.now().year
copyright = f'{year} Ben Welsh'
exclude_patterns = ["_build"]
html_theme = "alabaster"
html_sidebars = {
'**': [
# 'about.html',
# 'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
html_theme_options = {
"canonical_url": f"https://palewi.re/docs/{project}/",
"github_user": "palewire",
"github_repo": project,
"donate_url": "https://github.com/sponsors/palewire",
"show_powered_by": False,
}
pygments_style = 'sphinx'
| 21.151515 | 58 | 0.636103 |
acee366d2da6d363c2236df42c587340c3bb9bd2 | 395 | py | Python | parisplus/wsgi.py | projetfulle/Paris-Sportif | 267d3db0f777b57b8f0e72c1112744efd7a1520e | [
"MIT"
] | 1 | 2019-08-06T22:05:31.000Z | 2019-08-06T22:05:31.000Z | parisplus/wsgi.py | projetfulle/Paris-Sportif | 267d3db0f777b57b8f0e72c1112744efd7a1520e | [
"MIT"
] | null | null | null | parisplus/wsgi.py | projetfulle/Paris-Sportif | 267d3db0f777b57b8f0e72c1112744efd7a1520e | [
"MIT"
] | null | null | null | """
WSGI config for parisplus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parisplus.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
acee370e931602bd3972eb631109e71c74c97ce8 | 2,023 | py | Python | youtube_dl/extractor/mangomolo.py | hackarada/youtube-dl | 2ba46715a41fe074eab2221170b2ac78fab93fad | [
"Unlicense"
] | 66,635 | 2019-03-10T21:34:18.000Z | 2022-03-31T23:50:31.000Z | youtube_dl/extractor/mangomolo.py | hackarada/youtube-dl | 2ba46715a41fe074eab2221170b2ac78fab93fad | [
"Unlicense"
] | 10,936 | 2019-03-10T21:35:47.000Z | 2022-03-31T23:46:52.000Z | youtube_dl/extractor/mangomolo.py | hackarada/youtube-dl | 2ba46715a41fe074eab2221170b2ac78fab93fad | [
"Unlicense"
] | 15,194 | 2019-03-10T21:09:27.000Z | 2022-03-31T22:13:49.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote,
)
from ..utils import int_or_none
class MangomoloBaseIE(InfoExtractor):
_BASE_REGEX = r'https?://(?:admin\.mangomolo\.com/analytics/index\.php/customers/embed/|player\.mangomolo\.com/v1/)'
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(
'https://player.mangomolo.com/v1/%s?%s' % (self._TYPE, url.split('?')[1]), page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'(?:file|src)\s*:\s*"(https?://[^"]+?/playlist\.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"'
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
self._sort_formats(formats)
return {
'id': page_id,
'title': self._live_title(page_id) if self._IS_LIVE else page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
_TYPE = 'video'
IE_NAME = 'mangomolo:' + _TYPE
_VALID_URL = MangomoloBaseIE._BASE_REGEX + r'video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
_TYPE = 'live'
IE_NAME = 'mangomolo:' + _TYPE
_VALID_URL = MangomoloBaseIE._BASE_REGEX + r'(live|index)\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return compat_b64decode(compat_urllib_parse_unquote(page_id)).decode()
| 34.288136 | 120 | 0.628769 |
acee381591c7aea5b076a726f48f986201a5b821 | 6,247 | py | Python | homeassistant/components/sonarr/config_flow.py | anaisbetts/core | 9879b7becfa03e8ff5b256276cb3fc5177b52a20 | [
"Apache-2.0"
] | 1 | 2021-07-31T15:19:30.000Z | 2021-07-31T15:19:30.000Z | homeassistant/components/sonarr/config_flow.py | anaisbetts/core | 9879b7becfa03e8ff5b256276cb3fc5177b52a20 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/sonarr/config_flow.py | anaisbetts/core | 9879b7becfa03e8ff5b256276cb3fc5177b52a20 | [
"Apache-2.0"
] | null | null | null | """Config flow for Sonarr."""
from __future__ import annotations
import logging
from typing import Any
from sonarr import Sonarr, SonarrAccessRestricted, SonarrError
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_BASE_PATH,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_UPCOMING_DAYS,
DEFAULT_VERIFY_SSL,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
sonarr = Sonarr(
host=data[CONF_HOST],
port=data[CONF_PORT],
api_key=data[CONF_API_KEY],
base_path=data[CONF_BASE_PATH],
tls=data[CONF_SSL],
verify_ssl=data[CONF_VERIFY_SSL],
session=session,
)
await sonarr.update()
return True
class SonarrConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Sonarr."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the flow."""
self._reauth = False
self._entry_id = None
self._entry_data = {}
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return SonarrOptionsFlowHandler(config_entry)
async def async_step_reauth(self, data: ConfigType | None = None) -> dict[str, Any]:
"""Handle configuration by re-auth."""
self._reauth = True
self._entry_data = dict(data)
entry = await self.async_set_unique_id(self.unique_id)
self._entry_id = entry.entry_id
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: ConfigType | None = None
) -> dict[str, Any]:
"""Confirm reauth dialog."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"host": self._entry_data[CONF_HOST]},
data_schema=vol.Schema({}),
errors={},
)
return await self.async_step_user()
async def async_step_user(
self, user_input: ConfigType | None = None
) -> dict[str, Any]:
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
if self._reauth:
user_input = {**self._entry_data, **user_input}
if CONF_VERIFY_SSL not in user_input:
user_input[CONF_VERIFY_SSL] = DEFAULT_VERIFY_SSL
try:
await validate_input(self.hass, user_input)
except SonarrAccessRestricted:
errors = {"base": "invalid_auth"}
except SonarrError:
errors = {"base": "cannot_connect"}
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
else:
if self._reauth:
return await self._async_reauth_update_entry(
self._entry_id, user_input
)
return self.async_create_entry(
title=user_input[CONF_HOST], data=user_input
)
data_schema = self._get_user_data_schema()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors,
)
async def _async_reauth_update_entry(
self, entry_id: str, data: dict
) -> dict[str, Any]:
"""Update existing config entry."""
entry = self.hass.config_entries.async_get_entry(entry_id)
self.hass.config_entries.async_update_entry(entry, data=data)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
def _get_user_data_schema(self) -> dict[str, Any]:
"""Get the data schema to display user form."""
if self._reauth:
return {vol.Required(CONF_API_KEY): str}
data_schema = {
vol.Required(CONF_HOST): str,
vol.Required(CONF_API_KEY): str,
vol.Optional(CONF_BASE_PATH, default=DEFAULT_BASE_PATH): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): bool,
}
if self.show_advanced_options:
data_schema[
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL)
] = bool
return data_schema
class SonarrOptionsFlowHandler(OptionsFlow):
"""Handle Sonarr client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input: ConfigType | None = None):
"""Manage Sonarr options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_UPCOMING_DAYS,
default=self.config_entry.options.get(
CONF_UPCOMING_DAYS, DEFAULT_UPCOMING_DAYS
),
): int,
vol.Optional(
CONF_WANTED_MAX_ITEMS,
default=self.config_entry.options.get(
CONF_WANTED_MAX_ITEMS, DEFAULT_WANTED_MAX_ITEMS
),
): int,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
| 31.39196 | 88 | 0.62542 |
acee3967e67a5dc45b49936bfda50a1d99c1bca6 | 3,263 | py | Python | c7n/cache.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 1 | 2021-04-27T10:27:35.000Z | 2021-04-27T10:27:35.000Z | c7n/cache.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 28 | 2020-09-23T03:56:48.000Z | 2021-04-21T19:08:55.000Z | c7n/cache.py | lfranchini31/cloud-custodian | 1830fe4b9a59ff6afb675985c9ea531571616a76 | [
"Apache-2.0"
] | 9 | 2019-11-18T07:46:44.000Z | 2020-04-15T11:20:20.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Provide basic caching services to avoid extraneous queries over
multiple policies on the same resource type.
"""
import pickle
import os
import logging
import time
log = logging.getLogger('custodian.cache')
CACHE_NOTIFY = False
def factory(config):
global CACHE_NOTIFY
if not config:
return NullCache(None)
if not config.cache or not config.cache_period:
if not CACHE_NOTIFY:
log.debug("Disabling cache")
CACHE_NOTIFY = True
return NullCache(config)
elif config.cache == 'memory':
if not CACHE_NOTIFY:
log.debug("Using in-memory cache")
CACHE_NOTIFY = True
return InMemoryCache()
return FileCacheManager(config)
class NullCache:
def __init__(self, config):
self.config = config
def load(self):
return False
def get(self, key):
pass
def save(self, key, data):
pass
def size(self):
return 0
class InMemoryCache:
# Running in a temporary environment, so keep as a cache.
__shared_state = {}
def __init__(self):
self.data = self.__shared_state
def load(self):
return True
def get(self, key):
return self.data.get(pickle.dumps(key))
def save(self, key, data):
self.data[pickle.dumps(key)] = data
def size(self):
return sum(map(len, self.data.values()))
class FileCacheManager:
def __init__(self, config):
self.config = config
self.cache_period = config.cache_period
self.cache_path = os.path.abspath(
os.path.expanduser(
os.path.expandvars(
config.cache)))
self.data = {}
def get(self, key):
k = pickle.dumps(key)
return self.data.get(k)
def load(self):
if self.data:
return True
if os.path.isfile(self.cache_path):
if (time.time() - os.stat(self.cache_path).st_mtime >
self.config.cache_period * 60):
return False
with open(self.cache_path, 'rb') as fh:
try:
self.data = pickle.load(fh)
except EOFError:
return False
log.debug("Using cache file %s" % self.cache_path)
return True
def save(self, key, data):
try:
with open(self.cache_path, 'wb') as fh:
self.data[pickle.dumps(key)] = data
pickle.dump(self.data, fh, protocol=2)
except Exception as e:
log.warning("Could not save cache %s err: %s" % (
self.cache_path, e))
if not os.path.exists(self.cache_path):
directory = os.path.dirname(self.cache_path)
log.info('Generating Cache directory: %s.' % directory)
try:
os.makedirs(directory)
except Exception as e:
log.warning("Could not create directory: %s err: %s" % (
directory, e))
def size(self):
return os.path.exists(self.cache_path) and os.path.getsize(self.cache_path) or 0
| 25.896825 | 88 | 0.570334 |
acee39a3aa63a0de841329898df6fe9d59472921 | 1,577 | py | Python | share/rpcauth/rpcauth.py | maximcoin-project/maximcoin | 55f387477f2f579ef4146090ad48ab0fa29935dd | [
"MIT"
] | null | null | null | share/rpcauth/rpcauth.py | maximcoin-project/maximcoin | 55f387477f2f579ef4146090ad48ab0fa29935dd | [
"MIT"
] | null | null | null | share/rpcauth/rpcauth.py | maximcoin-project/maximcoin | 55f387477f2f579ef4146090ad48ab0fa29935dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to maximcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 33.553191 | 134 | 0.714648 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.