text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__version__=''' $Id$ '''
import os, sys, glob, shutil
def specialOption(n):
v = False
while n in sys.argv:
v = True
sys.argv.remove(n)
return v
#defaults for these options may be configured in local-setup.cfg
#[OPTIONS]
#no-download-t1-files=yes
#ignore-system-libart=yes
# if used on command line the config values are not used
dlt1 = not specialOption('--no-download-t1-files')
isla = specialOption('--ignore-system-libart')
try:
import configparser
except ImportError:
import ConfigParser as configparser
isPy3 = sys.version_info[0]==3
platform = sys.platform
pjoin = os.path.join
abspath = os.path.abspath
isfile = os.path.isfile
isdir = os.path.isdir
dirname = os.path.dirname
if __name__=='__main__':
pkgDir=dirname(sys.argv[0])
else:
pkgDir=dirname(__file__)
if not pkgDir:
pkgDir=os.getcwd()
elif not os.path.isabs(pkgDir):
pkgDir=os.path.abspath(pkgDir)
try:
os.chdir(pkgDir)
except:
print('!!!!! warning could not change directory to %r' % pkgDir)
daily=int(os.environ.get('RL_EXE_DAILY','0'))
import distutils
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from distutils import sysconfig
# from Zope - App.Common.package_home
def package_home(globals_dict):
__name__=globals_dict['__name__']
m=sys.modules[__name__]
r=os.path.split(m.__path__[0])[0]
return r
package_path = pjoin(package_home(distutils.__dict__), 'site-packages', 'reportlab')
def get_version():
if daily: return 'daily'
#determine Version
HERE = pkgDir
if os.getcwd()!=HERE:
if __name__=='__main__':
HERE=os.path.dirname(sys.argv[0])
else:
HERE=os.path.dirname(__file__)
#first try source
FN = pjoin(HERE,'src','reportlab','__init__')
try:
for l in open(pjoin(FN+'.py'),'r').readlines():
if l.startswith('Version'):
D = {}
exec(l.strip(),D)
return D['Version']
except:
pass
#don't have source, try import
import imp
for desc in ('.pyc', 'rb', 2), ('.pyo', 'rb', 2):
try:
fn = FN+desc[0]
f = open(fn,desc[1])
m = imp.load_module('reportlab',f,fn,desc)
return m.Version
except:
pass
raise ValueError('Cannot determine ReportLab Version')
class config:
def __init__(self):
try:
self.parser = configparser.RawConfigParser()
self.parser.read([pjoin(pkgDir,'setup.cfg'),pjoin(pkgDir,'local-setup.cfg')])
except:
self.parser = None
def __call__(self,sect,name,default=None):
try:
return self.parser.get(sect,name)
except:
return default
config = config()
if dlt1:
#not set on command line so try for config value
dlt1 = not config('OPTIONS','no-download-t1-files','0').lower() in ('1','true','yes')
if not isla:
#not set on command line so try for config value
isla = config('OPTIONS','ignore-system-libart','0').lower() in ('1','true','yes')
#this code from /FBot's PIL setup.py
def aDir(P, d, x=None):
if d and os.path.isdir(d) and d not in P:
if x is None:
P.append(d)
else:
P.insert(x, d)
class inc_lib_dirs:
L = None
I = None
def __call__(self):
if self.L is None:
L = []
I = []
if platform == "cygwin":
aDir(L, os.path.join("/usr/lib", "python%s" % sys.version[:3], "config"))
elif platform == "darwin":
# attempt to make sure we pick freetype2 over other versions
aDir(I, "/sw/include/freetype2")
aDir(I, "/sw/lib/freetype2/include")
# fink installation directories
aDir(L, "/sw/lib")
aDir(I, "/sw/include")
# darwin ports installation directories
aDir(L, "/opt/local/lib")
aDir(I, "/opt/local/include")
aDir(I, "/usr/local/include")
aDir(L, "/usr/local/lib")
aDir(I, "/usr/include")
aDir(L, "/usr/lib")
aDir(I, "/usr/include/freetype2")
prefix = sysconfig.get_config_var("prefix")
if prefix:
aDir(L, pjoin(prefix, "lib"))
aDir(I, pjoin(prefix, "include"))
self.L=L
self.I=I
return self.I,self.L
inc_lib_dirs=inc_lib_dirs()
def getVersionFromCCode(fn):
import re
tag = re.search(r'^#define\s+VERSION\s+"([^"]*)"',open(fn,'r').read(),re.M)
return tag and tag.group(1) or ''
class _rl_dir_info:
def __init__(self,cn):
self.cn=cn
def __call__(self,dir):
import stat
fn = pjoin(dir,self.cn)
try:
return getVersionFromCCode(fn),os.stat(fn)[stat.ST_MTIME]
except:
return None
def _find_rl_ccode(dn='rl_accel',cn='_rl_accel.c'):
'''locate where the accelerator code lives'''
_ = []
for x in [
pjoin('src','rl_addons',dn),
pjoin('rl_addons',dn),
pjoin('..','rl_addons',dn),
pjoin('..','..','rl_addons',dn),
dn,
pjoin('..',dn),
pjoin('..','..',dn),
] \
+ glob.glob(pjoin(dn+'-*',dn))\
+ glob.glob(pjoin('..',dn+'-*',dn))\
+ glob.glob(pjoin('..','..',dn+'-*',dn))\
:
fn = pjoin(pkgDir,x,cn)
if isfile(fn):
_.append(x)
if _:
_ = list(filter(_rl_dir_info(cn),_))
if len(_):
_.sort(key=_rl_dir_info)
return abspath(_[0])
return None
def BIGENDIAN(macname,value=None):
'define a macro if bigendian'
return sys.byteorder=='big' and [(macname,value)] or []
def pfxJoin(pfx,*N):
R=[]
for n in N:
R.append(os.path.join(pfx,n))
return R
INFOLINES=[]
def infoline(t):
print(t)
INFOLINES.append(t)
reportlab_files= [
'fonts/00readme.txt',
'fonts/bitstream-vera-license.txt',
'fonts/DarkGarden-copying-gpl.txt',
'fonts/DarkGarden-copying.txt',
'fonts/DarkGarden-readme.txt',
'fonts/DarkGarden.sfd',
'fonts/DarkGardenMK.afm',
'fonts/DarkGardenMK.pfb',
'fonts/Vera.ttf',
'fonts/VeraBd.ttf',
'fonts/VeraBI.ttf',
'fonts/VeraIt.ttf',
'fonts/_abi____.pfb',
'fonts/_ab_____.pfb',
'fonts/_ai_____.pfb',
'fonts/_a______.pfb',
'fonts/cobo____.pfb',
'fonts/cob_____.pfb',
'fonts/com_____.pfb',
'fonts/coo_____.pfb',
'fonts/_ebi____.pfb',
'fonts/_eb_____.pfb',
'fonts/_ei_____.pfb',
'fonts/_er_____.pfb',
'fonts/sy______.pfb',
'fonts/zd______.pfb',
'fonts/zx______.pfb',
'fonts/zy______.pfb',
]
def get_fonts(PACKAGE_DIR, reportlab_files):
import sys, os, os.path, zipfile, io
if isPy3:
import urllib.request as ureq
else:
import urllib2 as ureq
rl_dir = PACKAGE_DIR['reportlab']
if not [x for x in reportlab_files if not os.path.isfile(pjoin(rl_dir,x))]:
infoline("Standard T1 font curves already downloaded")
return
elif not dlt1:
infoline('not downloading T1 font curve files')
return
try:
infoline("Downloading standard T1 font curves")
remotehandle = ureq.urlopen("http://www.reportlab.com/ftp/pfbfer-20070710.zip")
zipdata = io.BytesIO(remotehandle.read())
remotehandle.close()
archive = zipfile.ZipFile(zipdata)
dst = pjoin(rl_dir, 'fonts')
for name in archive.namelist():
if not name.endswith('/'):
outfile = open(os.path.join(dst, name), 'wb')
outfile.write(archive.read(name))
outfile.close()
xitmsg = "Finished download of standard T1 font curves"
except:
xitmsg = "Failed to download standard T1 font curves"
reportlab_files = [x for x in reportlab_files if os.path.isfile(pjoin(rl_dir,x))]
infoline(xitmsg)
def main():
#test to see if we've a special command
if 'tests' in sys.argv or 'tests-preinstall' in sys.argv:
if len(sys.argv)!=2:
raise ValueError('tests commands may only be used alone')
cmd = sys.argv[-1]
PYTHONPATH=[pkgDir]
if cmd=='tests-preinstall':
PYTHONPATH.insert(0,pjoin(pkgDir,'src'))
os.environ['PYTHONPATH']=os.pathsep.join(PYTHONPATH)
os.chdir(pjoin(pkgDir,'tests'))
os.system("%s runAll.py" % sys.executable)
return
debug_compile_args = []
debug_link_args = []
debug_macros = []
debug = int(os.environ.get('RL_DEBUG','0'))
if debug:
if sys.platform == 'win32':
debug_compile_args=['/Zi']
debug_link_args=['/DEBUG']
if debug>1:
debug_macros.extend([('RL_DEBUG',debug), ('ROBIN_DEBUG',None)])
SPECIAL_PACKAGE_DATA = {}
RL_ACCEL = _find_rl_ccode('rl_accel','_rl_accel.c')
LIBRARIES=[]
EXT_MODULES = []
if not RL_ACCEL:
infoline( '***************************************************')
infoline( '*No rl_accel code found, you can obtain it at *')
infoline( '*http://www.reportlab.org/downloads.html#_rl_accel*')
infoline( '***************************************************')
else:
infoline( '################################################')
infoline( '#Attempting install of _rl_accel & pyHnj')
infoline( '#extensions from %r'%RL_ACCEL)
infoline( '################################################')
fn = pjoin(RL_ACCEL,'hyphen.mashed')
SPECIAL_PACKAGE_DATA = {fn: pjoin('lib','hyphen.mashed')}
EXT_MODULES += [
Extension( 'reportlab.lib._rl_accel',
[pjoin(RL_ACCEL,'_rl_accel.c')],
include_dirs=[],
define_macros=[]+debug_macros,
library_dirs=[],
libraries=[], # libraries to link against
extra_compile_args=debug_compile_args,
extra_link_args=debug_link_args,
),
]
if not isPy3:
EXT_MODULES += [
Extension( 'reportlab.lib.pyHnj',
[pjoin(RL_ACCEL,'pyHnjmodule.c'),
pjoin(RL_ACCEL,'hyphen.c'),
pjoin(RL_ACCEL,'hnjalloc.c')],
include_dirs=[],
define_macros=[]+debug_macros,
library_dirs=[],
libraries=[], # libraries to link against
extra_compile_args=debug_compile_args,
extra_link_args=debug_link_args,
),
]
RENDERPM = _find_rl_ccode('renderPM','_renderPM.c')
if not RENDERPM:
infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
infoline( '!No rl_accel code found, you can obtain it at !')
infoline( '!http://www.reportlab.org/downloads.html !')
infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
else:
infoline( '################################################')
infoline( '#Attempting install of _renderPM')
infoline( '#extensions from %r'%RENDERPM)
GT1_DIR=pjoin(RENDERPM,'gt1')
#check for an installed libart
if isla:
LIBART_INC=None
else:
LIBART_INC = list(sorted(glob.glob('/usr/include/libart-*/libart_lgpl/libart-features.h')))
if LIBART_INC:
def installed_libart_version(fn):
for l in open(fn, 'r').readlines():
if l.startswith('#define LIBART_VERSION'):
v = l[:-1].split(' ')[-1]
return v
return '"0.0.0"'
LIBART_INC = LIBART_INC[-1]
LIBART_VERSION = installed_libart_version(LIBART_INC)
LIBART_INC = os.path.dirname(LIBART_INC)
LIBART_SOURCES=[]
LIBART_LIB = ['art_lgpl_2']
infoline('will use installed libart %s' % LIBART_VERSION.replace('"',''))
else:
LIBART_DIR = LIBART_INC = pjoin(RENDERPM,'libart_lgpl')
LIBART_LIB = []
LIBART_SOURCES=[
pjoin(LIBART_DIR,'art_vpath_bpath.c'),
pjoin(LIBART_DIR,'art_rgb_pixbuf_affine.c'),
pjoin(LIBART_DIR,'art_rgb_svp.c'),
pjoin(LIBART_DIR,'art_svp.c'),
pjoin(LIBART_DIR,'art_svp_vpath.c'),
pjoin(LIBART_DIR,'art_svp_vpath_stroke.c'),
pjoin(LIBART_DIR,'art_svp_ops.c'),
pjoin(LIBART_DIR,'art_vpath.c'),
pjoin(LIBART_DIR,'art_vpath_dash.c'),
pjoin(LIBART_DIR,'art_affine.c'),
pjoin(LIBART_DIR,'art_rect.c'),
pjoin(LIBART_DIR,'art_rgb_affine.c'),
pjoin(LIBART_DIR,'art_rgb_affine_private.c'),
pjoin(LIBART_DIR,'art_rgb.c'),
pjoin(LIBART_DIR,'art_rgb_rgba_affine.c'),
pjoin(LIBART_DIR,'art_svp_intersect.c'),
pjoin(LIBART_DIR,'art_svp_render_aa.c'),
pjoin(LIBART_DIR,'art_misc.c'),
]
def libart_version():
K = ('LIBART_MAJOR_VERSION','LIBART_MINOR_VERSION','LIBART_MICRO_VERSION')
D = {}
for l in open(pjoin(LIBART_DIR,'configure.in'),'r').readlines():
l = l.strip().split('=')
if len(l)>1 and l[0].strip() in K:
D[l[0].strip()] = l[1].strip()
if len(D)==3: break
return (sys.platform == 'win32' and '\\"%s\\"' or '"%s"') % '.'.join(map(lambda k,D=D: D.get(k,'?'),K))
LIBART_VERSION = libart_version()
infoline('will use package libart %s' % LIBART_VERSION.replace('"',''))
SOURCES=[pjoin(RENDERPM,'_renderPM.c'),
pjoin(GT1_DIR,'gt1-parset1.c'),
pjoin(GT1_DIR,'gt1-dict.c'),
pjoin(GT1_DIR,'gt1-namecontext.c'),
pjoin(GT1_DIR,'gt1-region.c'),
]+LIBART_SOURCES
if platform=='win32':
from distutils.util import get_platform
secname = 'FREETYPE_PATHS_%s' % get_platform().split('-')[-1].upper()
FT_LIB=os.environ.get('FT_LIB','')
if not FT_LIB: FT_LIB=config(secname,'lib','')
if FT_LIB and not os.path.isfile(FT_LIB):
infoline('# freetype lib %r not found' % FT_LIB)
FT_LIB=[]
if FT_LIB:
FT_INC_DIR=os.environ.get('FT_INC','')
if not FT_INC_DIR: FT_INC_DIR=config(secname,'inc')
FT_MACROS = [('RENDERPM_FT',None)]
FT_LIB_DIR = [dirname(FT_LIB)]
FT_INC_DIR = [FT_INC_DIR or pjoin(dirname(FT_LIB_DIR[0]),'include')]
FT_LIB_PATH = FT_LIB
FT_LIB = [os.path.splitext(os.path.basename(FT_LIB))[0]]
if isdir(FT_INC_DIR[0]):
infoline('# installing with freetype %r' % FT_LIB_PATH)
else:
infoline('# freetype2 include folder %r not found' % FT_INC_DIR[0])
FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
else:
FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
else:
if os.path.isdir('/usr/include/freetype2'):
FT_LIB_DIR = []
FT_INC_DIR = ['/usr/include/freetype2']
else:
FT_LIB_DIR=config('FREETYPE_PATHS','lib')
FT_LIB_DIR=[FT_LIB_DIR] if FT_LIB_DIR else []
FT_INC_DIR=config('FREETYPE_PATHS','inc')
FT_INC_DIR=[FT_INC_DIR] if FT_INC_DIR else []
I,L=inc_lib_dirs()
ftv = None
for d in I:
if isfile(pjoin(d, "ft2build.h")):
ftv = 21
FT_INC_DIR=[d,pjoin(d, "freetype2")]
break
d = pjoin(d, "freetype2")
if isfile(pjoin(d, "ft2build.h")):
ftv = 21
FT_INC_DIR=[d]
break
if isdir(pjoin(d, "freetype")):
ftv = 20
FT_INC_DIR=[d]
break
if ftv:
FT_LIB=['freetype']
FT_LIB_DIR=L
FT_MACROS = [('RENDERPM_FT',None)]
infoline('# installing with freetype version %d' % ftv)
else:
FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
if not FT_LIB:
infoline('# installing without freetype no ttf, sorry!')
infoline('# You need to install a static library version of the freetype2 software')
infoline('# If you need truetype support in renderPM')
infoline('# You may need to edit setup.cfg (win32)')
infoline('# or edit this file to access the library if it is installed')
EXT_MODULES += [Extension( 'reportlab.graphics._renderPM',
SOURCES,
include_dirs=[RENDERPM,LIBART_INC,GT1_DIR]+FT_INC_DIR,
define_macros=FT_MACROS+[('LIBART_COMPILATION',None)]+debug_macros+[('LIBART_VERSION',LIBART_VERSION)],
library_dirs=[]+FT_LIB_DIR,
# libraries to link against
libraries=FT_LIB+LIBART_LIB,
extra_compile_args=debug_compile_args,
extra_link_args=debug_link_args,
),
]
infoline('################################################')
#copy some special case files into place so package_data will treat them properly
PACKAGE_DIR = {'':'src','reportlab': pjoin('src','reportlab')}
for fn,dst in SPECIAL_PACKAGE_DATA.items():
shutil.copyfile(fn,pjoin(PACKAGE_DIR['reportlab'],dst))
reportlab_files.append(dst)
get_fonts(PACKAGE_DIR, reportlab_files)
try:
setup(
name="reportlab",
version=get_version(),
license="BSD license (see license.txt for details), Copyright (c) 2000-2015, ReportLab Inc.",
description="The Reportlab Toolkit",
long_description="""The ReportLab Toolkit. An Open Source Python library for generating PDFs and graphics.""",
author="Andy Robinson, Robin Becker, the ReportLab team and the community",
author_email="reportlab-users@lists2.reportlab.com",
url="http://www.reportlab.com/",
packages=[
'reportlab',
'reportlab.graphics.charts',
'reportlab.graphics.samples',
'reportlab.graphics.widgets',
'reportlab.graphics.barcode',
'reportlab.graphics',
'reportlab.lib',
'reportlab.pdfbase',
'reportlab.pdfgen',
'reportlab.platypus',
],
package_dir = PACKAGE_DIR,
package_data = {'reportlab': reportlab_files},
ext_modules = EXT_MODULES,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Printing',
'Topic :: Text Processing :: Markup',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
#this probably only works for setuptools, but distutils seems to ignore it
install_requires=['pillow>=2.4.0','pip>=1.4.1', 'setuptools>=2.2'],
)
print()
print('########## SUMMARY INFO #########')
print('\n'.join(INFOLINES))
finally:
for dst in SPECIAL_PACKAGE_DATA.values():
os.remove(pjoin(PACKAGE_DIR['reportlab'],dst))
reportlab_files.remove(dst)
if __name__=='__main__':
main()
|
{
"content_hash": "fa48b1da04743c09459b8b77ebd556ce",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 143,
"avg_line_length": 38.35636363636364,
"alnum_prop": 0.4932688661357603,
"repo_name": "Distrotech/reportlab",
"id": "913e93b08081323ae8289a9667e9c10048787fe0",
"size": "21176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "721758"
},
{
"name": "C++",
"bytes": "668"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2988317"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
}
|
"""Util functions to parse yaml config data."""
from src.common.utils import common_utils
class Error(Exception):
"""Base exception class for all errors in this module."""
pass # pylint:disable=unnecessary-pass
class ConfigPathNotFoundError(Error):
"""Error thrown when config data file is missing."""
pass # pylint:disable=unnecessary-pass
class ConfigEmptyError(Error):
"""Error thrown when control config data is missing."""
pass # pylint:disable=unnecessary-pass
class ConfigValueError(Error):
"""Error thrown when control config data is missing."""
pass # pylint:disable=unnecessary-pass
# pylint:disable=too-few-public-methods
class _Config:
"""Object to hold yaml config information."""
def __init__(self, data):
"""Initialize the config object.
Args:
data (dict): config data as dict object.
"""
self._data = data
def value(self, path, default=None):
"""Return value from yaml data for a given path str.
Args:
path: str, ex: export.pubsub.topic
default: obj, value to be returned if no path does not exist.
Returns:
value at the given path in the yaml file.
"""
parts = path.split('.')
current = self._data
val = default
try:
for part in parts:
val = current.get(part)
current = val
except AttributeError as err:
if default:
val = default
else:
raise ConfigValueError('%s not found in config.yaml' %
path) from err
return val
# pylint:enable=too-few-public-methods
def config(config_filepath):
"""Process yaml at config_filepath and return an object.
Args:
config_filepath(str): file path for the config file.
Returns:
object, _Config object.
Raises:
ConfigEmptyError: If the config does not contain control config.
"""
data = common_utils.process_yaml(config_filepath)
if not data:
raise ConfigEmptyError('Config: is empty')
return _Config(data)
|
{
"content_hash": "70767c8f7b46dbd1908eae8e6cf3e2b3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 73,
"avg_line_length": 26.91358024691358,
"alnum_prop": 0.6087155963302753,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "692a98859128e001e43714b757c25d78877598bf",
"size": "2775",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/quota-monitoring-alerting/python/src/common/utils/config_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import ServiceBusManagementClientConfiguration
from .operations import DisasterRecoveryConfigsOperations, EventHubsOperations, MigrationConfigsOperations, NamespacesOperations, Operations, PremiumMessagingRegionsOperations, QueuesOperations, RegionsOperations, RulesOperations, SubscriptionsOperations, TopicsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ServiceBusManagementClient: # pylint: disable=too-many-instance-attributes
"""Azure Service Bus client.
:ivar namespaces: NamespacesOperations operations
:vartype namespaces: azure.mgmt.servicebus.v2017_04_01.aio.operations.NamespacesOperations
:ivar queues: QueuesOperations operations
:vartype queues: azure.mgmt.servicebus.v2017_04_01.aio.operations.QueuesOperations
:ivar topics: TopicsOperations operations
:vartype topics: azure.mgmt.servicebus.v2017_04_01.aio.operations.TopicsOperations
:ivar disaster_recovery_configs: DisasterRecoveryConfigsOperations operations
:vartype disaster_recovery_configs:
azure.mgmt.servicebus.v2017_04_01.aio.operations.DisasterRecoveryConfigsOperations
:ivar event_hubs: EventHubsOperations operations
:vartype event_hubs: azure.mgmt.servicebus.v2017_04_01.aio.operations.EventHubsOperations
:ivar migration_configs: MigrationConfigsOperations operations
:vartype migration_configs:
azure.mgmt.servicebus.v2017_04_01.aio.operations.MigrationConfigsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.servicebus.v2017_04_01.aio.operations.Operations
:ivar premium_messaging_regions: PremiumMessagingRegionsOperations operations
:vartype premium_messaging_regions:
azure.mgmt.servicebus.v2017_04_01.aio.operations.PremiumMessagingRegionsOperations
:ivar rules: RulesOperations operations
:vartype rules: azure.mgmt.servicebus.v2017_04_01.aio.operations.RulesOperations
:ivar regions: RegionsOperations operations
:vartype regions: azure.mgmt.servicebus.v2017_04_01.aio.operations.RegionsOperations
:ivar subscriptions: SubscriptionsOperations operations
:vartype subscriptions:
azure.mgmt.servicebus.v2017_04_01.aio.operations.SubscriptionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2017-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ServiceBusManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.namespaces = NamespacesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.queues = QueuesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.topics = TopicsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.disaster_recovery_configs = DisasterRecoveryConfigsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.event_hubs = EventHubsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.migration_configs = MigrationConfigsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize
)
self.premium_messaging_regions = PremiumMessagingRegionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.rules = RulesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.regions = RegionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.subscriptions = SubscriptionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ServiceBusManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
{
"content_hash": "1d0586f644eb14e13555a243ffe3c93e",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 272,
"avg_line_length": 48.84615384615385,
"alnum_prop": 0.7152469577666428,
"repo_name": "Azure/azure-sdk-for-python",
"id": "749cedf823ea9bb4cc0d212938cdc12618447a9f",
"size": "7453",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2017_04_01/aio/_service_bus_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = 'genomespaceio',
packages = ['genomespaceio'], # this must be the same as the name above
version = '0.13',
description = 'A library for reading and writing files to GenomeSpace (http://www.genomespace.org)',
author = 'Ted Liefeld',
author_email = 'jliefeld@cloud.ucsd.edu',
url = 'https://github.com/GenomeSpace/genomespace_io',
download_url = 'https://github.com/GenomeSpace/genomespace_io/tarball/0.12', # I'll explain this in a second
keywords = ['testing', 'logging', 'example'], # arbitrary keywords
classifiers = [],
install_requires=['pandas'],
package_data={'genomespaceio': ['static/index.js']}
)
|
{
"content_hash": "b9235e8d27b91c0f57af87e4e3d86bdf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 110,
"avg_line_length": 35.89473684210526,
"alnum_prop": 0.6964809384164223,
"repo_name": "GenomeSpace/genomespace_io",
"id": "8d66e425baf80346433ce260af66315f34494964",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "31"
},
{
"name": "Python",
"bytes": "7226"
}
],
"symlink_target": ""
}
|
import sys
from lib import unigraph
from lib import search
def usage():
print("usage:", sys.argv[0], "graph.txt", "vertex")
print()
print("list vertices reachable from the source vertex")
if "__main__" == __name__:
if 3 > len(sys.argv):
usage()
else:
vertex = int(sys.argv[2])
graph = unigraph.load(sys.argv[1])
srch = search.DepthFirstSearch(graph, vertex)
# get a list of connected vertices
vertices = []
for vertex in range(graph.vertices()):
if srch.marked(vertex):
vertices.append(vertex)
print(*(str(x) for x in vertices))
if len(vertices) != graph.vertices():
print("NOT", end=' ')
print('connected')
|
{
"content_hash": "ce89b5fc28326b98b3e59df0500fbd5d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 59,
"avg_line_length": 25.433333333333334,
"alnum_prop": 0.5583224115334207,
"repo_name": "skhal/algorithms_old",
"id": "058b52e86326ad69e703fc9677b14fec30919c63",
"size": "987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ch4/python/unigraph_dfs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "524"
},
{
"name": "C++",
"bytes": "8777"
},
{
"name": "Python",
"bytes": "38797"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seednetwork', '0003_add_memberinfo_usda'),
]
operations = [
migrations.AddField(
model_name='memberinfo',
name='external_url',
field=models.URLField(blank=True),
),
]
|
{
"content_hash": "7fa0bdebe7906e379ae6823ebbb64a80",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 21.5,
"alnum_prop": 0.599483204134367,
"repo_name": "RockinRobin/seednetwork",
"id": "99cdafee21db6fb4bdde44fcb60719244918d3ef",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seednetwork/migrations/0004_memberinfo_external_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1960"
},
{
"name": "HTML",
"bytes": "30666"
},
{
"name": "JavaScript",
"bytes": "2744"
},
{
"name": "Python",
"bytes": "65109"
}
],
"symlink_target": ""
}
|
def test_pass():
assert True
|
{
"content_hash": "866ab50b3c3367b96cb98529ec790232",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 16,
"avg_line_length": 16,
"alnum_prop": 0.65625,
"repo_name": "bergren2/plunder",
"id": "d793007cad19395f867b2ffda7bba9599f94ffaf",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2041"
}
],
"symlink_target": ""
}
|
"""
Point segment module
"""
from copy import deepcopy
import sys
import traceback
import numpy as np
from .point import Point
from .utils import pairwise
from .smooth import with_no_strategy, with_extrapolation, with_inverse
from .smooth import NO_STRATEGY, INVERSE_STRATEGY, EXTRAPOLATE_STRATEGY
from .location import infer_location
from .similarity import sort_segment_points, closest_point
from .compression import spt, drp
from .transportation_mode import speed_clustering
from .spatiotemporal_segmentation import spatiotemporal_segmentation
def remove_liers(points):
""" Removes obvious noise points
Checks time consistency, removing points that appear out of order
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`list` of :obj:`Point`
"""
result = [points[0]]
for i in range(1, len(points) - 2):
prv = points[i-1]
crr = points[i]
nxt = points[i+1]
if prv.time <= crr.time and crr.time <= nxt.time:
result.append(crr)
result.append(points[-1])
return result
class Segment(object):
"""Holds the points and semantic information about them
Attributes:
points (:obj:`list` of :obj:`Point`): points of the segment
#TODO
transportation_modes: array of transportation modes of the segment
Each transportation mode represents a span of points
Each span is a map in the following format:
label: string with the type of transportation mode
from: start of the span
to: end of the span
locationFrom: TrackToTrip.Location or None, the semantic location of
the start of the segment
locationTo: TrackToTrip.Location or None, the semantic location of
the end of the segment
"""
def __init__(self, points):
self.points = points
self.transportation_modes = []
self.location_from = None
self.location_to = None
def bounds(self, thr=0, lower_index=0, upper_index=-1):
""" Computes the bounds of the segment, or part of it
Args:
lower_index (int, optional): Start index. Defaults to 0
upper_index (int, optional): End index. Defaults to 0
Returns:
:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that
(min_lat, min_lon, max_lat, max_lon)
"""
points = self.points[lower_index:upper_index]
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for point in points:
min_lat = min(min_lat, point.lat)
min_lon = min(min_lon, point.lon)
max_lat = max(max_lat, point.lat)
max_lon = max(max_lon, point.lon)
return (min_lat - thr, min_lon - thr, max_lat + thr, max_lon + thr)
def remove_noise(self):
"""In-place removal of noise points
See `remove_noise` function
Returns:
:obj:`Segment`
"""
self.points = remove_liers(self.points)
return self
def smooth(self, noise, strategy=INVERSE_STRATEGY):
""" In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
"""
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self
def segment(self, eps, min_time):
"""Spatio-temporal segmentation
See spatiotemporal_segmentation function
Args:
eps (float): Maximum distance between two samples
min_time (float): Minimum time between to segment
Returns:
:obj:`list` of :obj:`Point`
"""
return spatiotemporal_segmentation(self.points, eps, min_time)
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
"""
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self
def compute_metrics(self):
""" Computes metrics for each point
Returns:
:obj:`Segment`: self
"""
for prev, point in pairwise(self.points):
point.compute_metrics(prev)
return self
def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
"""In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self
"""
self.location_from = infer_location(
self.points[0],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
self.location_to = infer_location(
self.points[-1],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
return self
def infer_transportation_mode(self, clf, min_time):
"""In-place transportation mode inferring
See infer_transportation_mode function
Args:
Returns:
:obj:`Segment`: self
"""
self.transportation_modes = speed_clustering(clf, self.points, min_time)
return self
def merge_and_fit(self, segment):
""" Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
"""
self.points = sort_segment_points(self.points, segment.points)
return self
def closest_point_to(self, point, thr=20.0):
""" Finds the closest point in the segment to a given point
Args:
point (:obj:`Point`)
thr (float, optional): Distance threshold, in meters, to be considered
the same point. Defaults to 20.0
Returns:
(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment
"""
i = 0
point_arr = point.gen2arr()
def closest_in_line(pointA, pointB):
temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr)
return Point(temp[1], temp[0], None)
for (p_a, p_b) in pairwise(self.points):
candidate = closest_in_line(p_a, p_b)
if candidate.distance(point) <= thr:
if p_a.distance(point) <= thr:
return i, p_a
elif p_b.distance(point) <= thr:
return i + 1, p_b
else:
return i, candidate
i = i + 1
return -1, None
def slice(self, start, end):
""" Creates a copy of the current segment between indexes. If end > start,
points are reverted
Args:
start (int): Start index
end (int): End index
Returns:
:obj:`Segment`
"""
reverse = False
if start > end:
temp = start
start = end
end = temp
reverse = True
seg = self.copy()
seg.points = seg.points[start:end+1]
if reverse:
seg.points = list(reversed(seg.points))
return seg
def copy(self):
""" Creates a deep copy of this instance
Returns:
:obj:`Segment`
"""
return deepcopy(self)
def to_json(self):
""" Converts segment to a JSON serializable format
Returns:
:obj:`dict`
"""
points = [point.to_json() for point in self.points]
return {
'points': points,
'transportationModes': self.transportation_modes,
'locationFrom': self.location_from.to_json() if self.location_from != None else None,
'locationTo': self.location_to.to_json() if self.location_to != None else None
}
@staticmethod
def from_gpx(gpx_segment):
""" Creates a segment from a GPX format.
No preprocessing is done.
Arguments:
gpx_segment (:obj:`gpxpy.GPXTrackSegment`)
Return:
:obj:`Segment`
"""
points = []
for point in gpx_segment.points:
points.append(Point.from_gpx(point))
return Segment(points)
@staticmethod
def from_json(json):
""" Creates a segment from a JSON file.
No preprocessing is done.
Arguments:
json (:obj:`dict`): JSON representation. See to_json.
Return:
:obj:`Segment`
"""
points = []
for point in json['points']:
points.append(Point.from_json(point))
return Segment(points)
|
{
"content_hash": "57783655580768546b56657c30173699",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 109,
"avg_line_length": 30.047904191616766,
"alnum_prop": 0.5628736548425668,
"repo_name": "ruipgil/TrackToTrip",
"id": "eccd2b0079e17e9d7c48d5cafe48d3388aa18a7f",
"size": "10036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracktotrip/segment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121973"
}
],
"symlink_target": ""
}
|
import io
from lxml import etree
import six
import webtest
from keystone.auth import controllers as auth_controllers
from keystone.common import serializer
from keystone.openstack.common import jsonutils
from keystone import tests
from keystone.tests import default_fixtures
class RestfulTestCase(tests.TestCase):
"""Performs restful tests against the WSGI app over HTTP.
This class launches public & admin WSGI servers for every test, which can
be accessed by calling ``public_request()`` or ``admin_request()``,
respectfully.
``restful_request()`` and ``request()`` methods are also exposed if you
need to bypass restful conventions or access HTTP details in your test
implementation.
Three new asserts are provided:
* ``assertResponseSuccessful``: called automatically for every request
unless an ``expected_status`` is provided
* ``assertResponseStatus``: called instead of ``assertResponseSuccessful``,
if an ``expected_status`` is provided
* ``assertValidResponseHeaders``: validates that the response headers
appear as expected
Requests are automatically serialized according to the defined
``content_type``. Responses are automatically deserialized as well, and
available in the ``response.body`` attribute. The original body content is
available in the ``response.raw`` attribute.
"""
# default content type to test
content_type = 'json'
def setUp(self, app_conf='keystone'):
super(RestfulTestCase, self).setUp()
# Will need to reset the plug-ins
self.addCleanup(setattr, auth_controllers, 'AUTH_METHODS', {})
self.load_backends()
self.load_fixtures(default_fixtures)
self.public_app = webtest.TestApp(
self.loadapp(app_conf, name='main'))
self.addCleanup(delattr, self, 'public_app')
self.admin_app = webtest.TestApp(
self.loadapp(app_conf, name='admin'))
self.addCleanup(delattr, self, 'admin_app')
def request(self, app, path, body=None, headers=None, token=None,
expected_status=None, **kwargs):
if headers:
headers = dict([(str(k), str(v)) for k, v
in six.iteritems(headers)])
else:
headers = {}
if token:
headers['X-Auth-Token'] = str(token)
# setting body this way because of:
# https://github.com/Pylons/webtest/issues/71
if body:
kwargs['body_file'] = io.BytesIO(body)
# sets environ['REMOTE_ADDR']
kwargs.setdefault('remote_addr', 'localhost')
response = app.request(path, headers=headers,
status=expected_status, **kwargs)
return response
def assertResponseSuccessful(self, response):
"""Asserts that a status code lies inside the 2xx range.
:param response: :py:class:`httplib.HTTPResponse` to be
verified to have a status code between 200 and 299.
example::
self.assertResponseSuccessful(response)
"""
self.assertTrue(
response.status_code >= 200 and response.status_code <= 299,
'Status code %d is outside of the expected range (2xx)\n\n%s' %
(response.status, response.body))
def assertResponseStatus(self, response, expected_status):
"""Asserts a specific status code on the response.
:param response: :py:class:`httplib.HTTPResponse`
:param expected_status: The specific ``status`` result expected
example::
self.assertResponseStatus(response, 204)
"""
self.assertEqual(
response.status_code,
expected_status,
'Status code %s is not %s, as expected)\n\n%s' %
(response.status_code, expected_status, response.body))
def assertValidResponseHeaders(self, response):
"""Ensures that response headers appear as expected."""
self.assertIn('X-Auth-Token', response.headers.get('Vary'))
def assertValidErrorResponse(self, response, expected_status=400):
"""Verify that the error response is valid.
Subclasses can override this function based on the expected response.
"""
self.assertEqual(response.status_code, expected_status)
error = response.result['error']
self.assertEqual(error['code'], response.status_code)
self.assertIsNotNone(error.get('title'))
def _to_content_type(self, body, headers, content_type=None):
"""Attempt to encode JSON and XML automatically."""
content_type = content_type or self.content_type
if content_type == 'json':
headers['Accept'] = 'application/json'
if body:
headers['Content-Type'] = 'application/json'
return jsonutils.dumps(body)
elif content_type == 'xml':
headers['Accept'] = 'application/xml'
if body:
headers['Content-Type'] = 'application/xml'
return serializer.to_xml(body)
def _from_content_type(self, response, content_type=None):
"""Attempt to decode JSON and XML automatically, if detected."""
content_type = content_type or self.content_type
if response.body is not None and response.body.strip():
# if a body is provided, a Content-Type is also expected
header = response.headers.get('Content-Type')
self.assertIn(content_type, header)
if content_type == 'json':
response.result = jsonutils.loads(response.body)
elif content_type == 'xml':
response.result = etree.fromstring(response.body)
else:
response.result = response.body
def restful_request(self, method='GET', headers=None, body=None,
content_type=None, response_content_type=None,
**kwargs):
"""Serializes/deserializes json/xml as request/response body.
.. WARNING::
* Existing Accept header will be overwritten.
* Existing Content-Type header will be overwritten.
"""
# Initialize headers dictionary
headers = {} if not headers else headers
body = self._to_content_type(body, headers, content_type)
# Perform the HTTP request/response
response = self.request(method=method, headers=headers, body=body,
**kwargs)
response_content_type = response_content_type or content_type
self._from_content_type(response, content_type=response_content_type)
# we can save some code & improve coverage by always doing this
if method != 'HEAD' and response.status_code >= 400:
self.assertValidErrorResponse(response)
# Contains the decoded response.body
return response
def _request(self, convert=True, **kwargs):
if convert:
response = self.restful_request(**kwargs)
else:
response = self.request(**kwargs)
self.assertValidResponseHeaders(response)
return response
def public_request(self, **kwargs):
return self._request(app=self.public_app, **kwargs)
def admin_request(self, **kwargs):
return self._request(app=self.admin_app, **kwargs)
def _get_token(self, body):
"""Convenience method so that we can test authenticated requests."""
r = self.public_request(method='POST', path='/v2.0/tokens', body=body)
return self._get_token_id(r)
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
return self._get_token({
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
})
def get_scoped_token(self, tenant_id=None):
"""Convenience method so that we can test authenticated requests."""
if not tenant_id:
tenant_id = self.tenant_bar['id']
return self._get_token({
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': tenant_id,
},
})
def _get_token_id(self, r):
"""Helper method to return a token ID from a response.
This needs to be overridden by child classes for on their content type.
"""
raise NotImplementedError()
|
{
"content_hash": "174c4c7f3bfd8c70b74258908e69f048",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 79,
"avg_line_length": 36.049586776859506,
"alnum_prop": 0.6114167812929848,
"repo_name": "rodrigods/keystone",
"id": "f181b975f67f1013b315e1a584d8a0ee7d22dc2c",
"size": "9310",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/tests/rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2971055"
},
{
"name": "Shell",
"bytes": "10635"
}
],
"symlink_target": ""
}
|
class Solution(object):
def findRadius(self, houses, heaters):
"""
:type houses: List[int]
:type heaters: List[int]
:rtype: int
"""
res = -1
heaters.sort()
for h in houses:
i = bisect.bisect_left(heaters, h)
left_dist = h - heaters[i - 1] if i > 0 else sys.maxsize
right_dist = heaters[i] - h if i < len(heaters) else sys.maxsize
res = max(res, min(left_dist, right_dist))
return res
|
{
"content_hash": "f089f4a7fcb0ea96ce0eb6d8106d4dcd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 33.8,
"alnum_prop": 0.5187376725838264,
"repo_name": "Mlieou/oj_solutions",
"id": "71266808684edda99a18e6b3ff19e0a116592ef3",
"size": "507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leetcode/python/ex_475.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "46346"
},
{
"name": "Java",
"bytes": "57728"
},
{
"name": "Python",
"bytes": "335504"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
}
|
class StateBase(object):
"""
Base class of all possible States within AWS State Language
Supported types: "Pass", "Task", "Choice", "Wait", "Succeed", "Fail", "Parallel", "Ext"
"""
def __init__(self, Name=None, Type=None, Comment=""):
if not Name:
raise Exception("Name must be specified")
if not isinstance(Name, str):
raise Exception("Name must be a string value")
if not Type:
raise Exception("Type must be specified (step '{}'".format(Name))
if not isinstance(Type, str):
raise Exception("Type must be a string value (step '{}'".format(Name))
if not Type in ["Pass", "Task", "Choice", "Wait", "Succeed", "Fail", "Parallel", "Ext"]:
raise Exception("Type must be one of the allowed types for AWS Step Functions (step '{}'".format(Name))
self._type = Type
self._name = Name
self._comment = ""
self.set_comment(Comment)
def validate(self):
pass
def to_json(self):
return {
"Type" : self.get_type(),
"Comment" : self.get_comment()
}
def get_name(self):
return self._name
def get_type(self):
return self._type
def get_comment(self):
return self._comment
def set_comment(self, Comment=""):
comment = ""
if Comment:
if not isinstance(Comment, str):
raise Exception("Comment must be a string value if specified, for step ({})".format(self.get_name()))
comment = Comment
self._comment = comment
def get_child_states(self):
return [self]
|
{
"content_hash": "982da95ad6b2e6d436cff829a90c85fa",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 106,
"avg_line_length": 26.867924528301888,
"alnum_prop": 0.6573033707865169,
"repo_name": "gford1000/awssl",
"id": "53c1a4b5e1b750e96dba465ab52ca2feab11c8cc",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awssl/state_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165950"
}
],
"symlink_target": ""
}
|
import numpy as np
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
from sharppy.viz.barbs import drawBarb
from sharppy.sharptab.constants import *
import platform
## routine written by Kelton Halbert
## keltonhalbert@ou.edu
__all__ = ['backgroundKinematics', 'plotKinematics']
class backgroundKinematics(QtGui.QFrame):
'''
Handles drawing the background frame.
'''
def __init__(self):
super(backgroundKinematics, self).__init__()
self.initUI()
def initUI(self):
## initialize fram variables such as size,
## padding, etc.
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
self.lpad = 5; self.rpad = 5
self.tpad = 5; self.bpad = 5
self.wid = self.size().width()
self.hgt = self.size().height()
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
fsize = np.floor(.06 * self.hgt)
self.tpad = np.floor(.03 * self.hgt)
self.label_font = QtGui.QFont('Helvetica')
self.label_font.setPixelSize(fsize)
self.label_metrics = QtGui.QFontMetrics( self.label_font )
self.label_height = self.label_metrics.xHeight() + self.tpad
self.ylast = self.label_height
self.barby = 0
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
def draw_frame(self, qp):
'''
Draws the background frame and the text headers for indices.
'''
## initialize a white pen with thickness 1 and a solid line
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.label_font)
## make the initial x value relative to the width of the frame
x1 = self.brx / 10
y1 = self.ylast + self.tpad
## draw the header
rect1 = QtCore.QRect(x1*2.5, 3, x1, self.label_height)
rect2 = QtCore.QRect(x1*5, 3, x1, self.label_height)
rect3 = QtCore.QRect(x1*7, 3, x1, self.label_height)
rect4 = QtCore.QRect(x1*9-self.rpad, 3, x1, self.label_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, 'SRH (m2/s2)')
qp.drawText(rect2, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, 'Shear (kt)')
qp.drawText(rect3, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, 'MnWind')
qp.drawText(rect4, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, 'SRW')
## left column
## first block
texts = ['SFC-1km', 'SFC-3km', 'Eff Inflow Layer',]
for text in texts:
rect = QtCore.QRect(self.lpad, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## second block
texts = ['SFC-6km', 'SFC-8km','LCL-EL (Cloud Layer)', 'Eff Shear (EBWD)']
y1 = self.ylast + self.tpad
for text in texts:
rect = QtCore.QRect(self.lpad, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## third block
texts = ['BRN Shear = ', '4-6km SR Wind = ']
y1 = self.ylast + self.tpad
for text in texts:
rect = QtCore.QRect(self.lpad, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## fourth block
texts = ['...Storm Motion Vectors...', 'Bunkers Right = ', 'Bunkers Left = ', 'Corfidi Downshear = ', 'Corfidi Upshear = ']
y1 = self.ylast + self.tpad
self.barby = y1 + self.tpad
for text in texts:
rect = QtCore.QRect(self.lpad, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = vspace
## draw lines seperating the indices
qp.drawLine( 0, self.ylast+3, self.brx, self.ylast+3 )
def resizeEvent(self, e):
'''
Handles when the window gets resized.
'''
self.initUI()
def plotBackground(self):
'''
Handles drawing the text background.
'''
## initialize a QPainter objext
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
## draw the frame
self.draw_frame(qp)
qp.end()
class plotKinematics(backgroundKinematics):
'''
Handles plotting the indices in the frame.
'''
def __init__(self):
## get the surfce based, most unstable, and mixed layer
## parcels to use for indices, as well as the sounding
## profile itself.
super(plotKinematics, self).__init__()
self.prof = None;
def setProf(self, prof):
self.ylast = self.label_height
self.prof = prof;
self.srh1km = prof.srh1km
self.srh3km = prof.srh3km
self.esrh = prof.right_esrh
self.mean_1km = prof.mean_1km
self.mean_3km = prof.mean_3km
self.mean_6km = prof.mean_6km
self.mean_8km = prof.mean_8km
self.mean_lcl_el = prof.mean_lcl_el
mean_eff = prof.mean_eff
mean_ebw = prof.mean_ebw
self.srw_1km = prof.srw_1km
self.srw_3km = prof.srw_3km
self.srw_6km = prof.srw_6km
self.srw_8km = prof.srw_8km
self.srw_lcl_el = prof.srw_lcl_el
self.srw_4_5km = prof.srw_4_5km
srw_eff = prof.srw_eff
srw_ebw = prof.srw_ebw
self.sfc_1km_shear = prof.sfc_1km_shear
self.sfc_3km_shear = prof.sfc_3km_shear
self.sfc_6km_shear = prof.sfc_6km_shear
self.sfc_8km_shear = prof.sfc_8km_shear
self.lcl_el_shear = prof.lcl_el_shear
self.eff_shear = prof.eff_shear
self.ebwd = prof.ebwd
if prof.etop is np.ma.masked or prof.ebottom is np.ma.masked:
self.mean_eff = [np.ma.masked, np.ma.masked]
self.mean_ebw = [np.ma.masked, np.ma.masked]
self.srw_eff = [np.ma.masked, np.ma.masked]
self.srw_ebw = [np.ma.masked, np.ma.masked]
else:
self.mean_eff = tab.utils.comp2vec(mean_eff[0], mean_eff[1])
self.mean_ebw = tab.utils.comp2vec(mean_ebw[0], mean_ebw[1])
self.srw_eff = tab.utils.comp2vec(srw_eff[0], srw_eff[1])
self.srw_ebw = tab.utils.comp2vec(srw_ebw[0], srw_ebw[1])
self.brn_shear = prof.mupcl.brnshear
self.bunkers_right_vec = tab.utils.comp2vec(prof.srwind[0], prof.srwind[1])
self.bunkers_left_vec = tab.utils.comp2vec(prof.srwind[2], prof.srwind[3])
self.upshear = tab.utils.comp2vec(prof.upshear_downshear[0],prof.upshear_downshear[1])
self.downshear = tab.utils.comp2vec(prof.upshear_downshear[2],prof.upshear_downshear[3])
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized.
'''
super(plotKinematics, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
super(plotKinematics, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(QtCore.Qt.black)
def plotData(self):
'''
Handles the drawing of the text on the frame.
'''
if self.prof is None:
return
x1 = self.brx / 10
y1 = self.bry / 19
origin_x = x1*8.5
origin_y = y1*15
## initialize a QPainter object
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
## draw the indices
self.drawKinematics(qp)
self.drawBarbs(qp)
qp.end()
def drawBarbs(self, qp):
x1 = self.brx / 10
y1 = self.bry / 19
origin_x = x1*8.
pen = QtGui.QPen(QtGui.QColor('#0A74C6'), 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.label_font)
rect0 = QtCore.QRect(x1*7, self.ylast + self.tpad, x1*2, self.label_height)
qp.drawText(rect0, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, '1km & 6km AGL\nWind Barbs' )
drawBarb(qp, origin_x, self.barby, self.prof.wind1km[0], self.prof.wind1km[1], color='#AA0000')
drawBarb(qp, origin_x, self.barby, self.prof.wind6km[0], self.prof.wind6km[1], color='#0A74C6')
def drawKinematics(self, qp):
'''
This handles the severe indices, such as STP, sig hail, etc.
---------
qp: QtGui.QPainter object
'''
## initialize a pen to draw with.
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.label_font)
x1 = self.brx / 10
y1 = self.ylast + self.tpad
## format the text
srh1km = tab.utils.INT2STR(self.srh1km[0])
srh3km = tab.utils.INT2STR(self.srh3km[0])
sfc1km = tab.utils.INT2STR(tab.utils.mag(self.sfc_1km_shear[0], self.sfc_1km_shear[1]))
sfc3km = tab.utils.INT2STR(tab.utils.mag(self.sfc_3km_shear[0], self.sfc_3km_shear[1]))
sfc6km = tab.utils.INT2STR(tab.utils.mag(self.sfc_6km_shear[0], self.sfc_6km_shear[1]))
sfc8km = tab.utils.INT2STR(tab.utils.mag(self.sfc_8km_shear[0], self.sfc_8km_shear[1]))
lcl_el = tab.utils.INT2STR(tab.utils.mag(self.lcl_el_shear[0], self.lcl_el_shear[1]))
mean_1km = tab.utils.INT2STR(self.mean_1km[0]) + '/' + tab.utils.INT2STR(self.mean_1km[1])
mean_3km = tab.utils.INT2STR(self.mean_3km[0]) + '/' + tab.utils.INT2STR(self.mean_3km[1])
mean_6km = tab.utils.INT2STR(self.mean_6km[0]) + '/' + tab.utils.INT2STR(self.mean_6km[1])
mean_8km = tab.utils.INT2STR(self.mean_8km[0]) + '/' + tab.utils.INT2STR(self.mean_8km[1])
mean_lcl_el = tab.utils.INT2STR(self.mean_lcl_el[0]) + '/' + tab.utils.INT2STR(self.mean_lcl_el[1])
srw_1km = tab.utils.INT2STR(self.srw_1km[0]) + '/' + tab.utils.INT2STR(self.srw_1km[1])
srw_3km = tab.utils.INT2STR(self.srw_3km[0]) + '/' + tab.utils.INT2STR(self.srw_3km[1])
srw_6km = tab.utils.INT2STR(self.srw_6km[0]) + '/' + tab.utils.INT2STR(self.srw_6km[1])
srw_8km = tab.utils.INT2STR(self.srw_8km[0]) + '/' + tab.utils.INT2STR(self.srw_8km[1])
srw_lcl_el = tab.utils.INT2STR(self.srw_lcl_el[0]) + '/' + tab.utils.INT2STR(self.srw_lcl_el[1])
srw_4_5km = tab.utils.INT2STR(self.srw_4_5km[0]) + '/' + tab.utils.INT2STR(self.srw_4_5km[1]) + ' kt'
esrh = tab.utils.INT2STR(self.esrh[0])
eff_lr = tab.utils.INT2STR(tab.utils.mag(self.eff_shear[0], self.eff_shear[1]))
efbwd = tab.utils.INT2STR(tab.utils.mag(self.ebwd[0], self.ebwd[1]))
mean_eff = tab.utils.INT2STR(self.mean_eff[0]) + '/' + tab.utils.INT2STR(self.mean_eff[1])
mean_ebw = tab.utils.INT2STR(self.mean_ebw[0]) + '/' + tab.utils.INT2STR(self.mean_ebw[1])
srw_eff = tab.utils.INT2STR(self.srw_eff[0]) + '/' + tab.utils.INT2STR(self.srw_eff[1])
srw_ebw = tab.utils.INT2STR(self.srw_ebw[0]) + '/' + tab.utils.INT2STR(self.srw_ebw[1])
brn_shear = tab.utils.INT2STR(self.brn_shear) + ' m2/s2'
bunkers_left = tab.utils.INT2STR(self.bunkers_left_vec[0]) + '/' + tab.utils.INT2STR(self.bunkers_left_vec[1]) + ' kt'
bunkers_right = tab.utils.INT2STR(self.bunkers_right_vec[0]) + '/' + tab.utils.INT2STR(self.bunkers_right_vec[1]) + ' kt'
upshear = tab.utils.INT2STR(self.upshear[0]) + '/' + tab.utils.INT2STR(self.upshear[1]) + ' kt'
downshear = tab.utils.INT2STR(self.downshear[0]) + '/' + tab.utils.INT2STR(self.downshear[1]) + ' kt'
## sfc-1km
texts = [srh1km, sfc1km, mean_1km, srw_1km]
count = 3
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## sfc-3km
texts = [srh3km, sfc3km, mean_3km, srw_3km]
count = 3
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## Effective Inflow Layer
texts = [esrh, eff_lr, mean_eff, srw_eff]
count = 3
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height + self.tpad
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## sfc-6km
texts = [sfc6km, mean_6km, srw_6km]
count = 5
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## sfc-8km
texts = [sfc8km, mean_8km, srw_8km]
count = 5
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## LCL-EL
texts = [lcl_el, mean_lcl_el, srw_lcl_el]
count = 5
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## Effective Shear
texts = [efbwd, mean_ebw, srw_ebw]
count = 5
for text in texts:
rect = QtCore.QRect(x1*count, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
count += 2
vspace = self.label_height + self.tpad
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
## BRN Shear and 4-6km SR Wind
texts = [brn_shear, srw_4_5km]
for text in texts:
rect = QtCore.QRect(x1*5, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
y1 += self.label_height + self.tpad # Not entirely sure why this doesn't
# need the extra pixels on Windows.
## bunkers motion
texts = [bunkers_right, bunkers_left]
colors =[QtGui.QColor('#0099CC'), QtGui.QColor('#FF6666')]
for text, color in zip(texts, colors):
rect = QtCore.QRect(x1*5, y1, x1, self.label_height)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
vspace = self.label_height
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
self.ylast = y1
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
## upshear and downshear vectors
texts = [downshear, upshear]
for text in texts:
rect = QtCore.QRect(x1*5, y1, x1, self.label_height)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight, text)
if platform.system() == "Windows":
vspace += self.label_metrics.descent()
y1 += vspace
|
{
"content_hash": "a2bea4f3a76fdda87cf5c2c23d5cbb13",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 131,
"avg_line_length": 41.509302325581395,
"alnum_prop": 0.574429940052664,
"repo_name": "blizzardwarriorwx/SHARPpy",
"id": "ce8215c0d22391eba0fd74f7a2ef76de894feccf",
"size": "17849",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sharppy/viz/kinematics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "3216"
},
{
"name": "Augeas",
"bytes": "2237"
},
{
"name": "BlitzMax",
"bytes": "158542"
},
{
"name": "Bro",
"bytes": "12784"
},
{
"name": "C++",
"bytes": "33333"
},
{
"name": "Coq",
"bytes": "3154"
},
{
"name": "Elm",
"bytes": "7066"
},
{
"name": "Erlang",
"bytes": "2217"
},
{
"name": "F#",
"bytes": "7633"
},
{
"name": "Haskell",
"bytes": "83907"
},
{
"name": "Jupyter Notebook",
"bytes": "420406"
},
{
"name": "KiCad Layout",
"bytes": "2242"
},
{
"name": "LOLCODE",
"bytes": "2229"
},
{
"name": "Lex",
"bytes": "5456"
},
{
"name": "Mathematica",
"bytes": "3223"
},
{
"name": "PHP",
"bytes": "2231"
},
{
"name": "Parrot",
"bytes": "9236"
},
{
"name": "PureBasic",
"bytes": "18590"
},
{
"name": "Python",
"bytes": "750357"
},
{
"name": "Scheme",
"bytes": "23902"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "Tcl",
"bytes": "6172"
},
{
"name": "TeX",
"bytes": "35505"
},
{
"name": "Visual Basic",
"bytes": "7854"
}
],
"symlink_target": ""
}
|
from math import asin
from math import degrees
class TestCase:
def __init__(self, v, d):
self.velocity = v
self.distance = d
def initialize_test_cases(lines):
global test_cases
for index, item in enumerate(lines):
if index > 0:
items = item.split(' ')
if (len(items) > 1):
v = int(items[0])
d = int(items[1])
test_cases[index - 1] = TestCase(v, d)
def compute_angle(v, d):
g = 9.8
return 0.5 * asin(min(g * d / v ** 2, 1))
def is_almost_equal(f1, f2, digit):
return abs(f1 - f2) < 0.1 ** digit
def print_all_results():
for x in range(len(test_cases)):
angle = round(degrees(compute_angle(test_cases[x].velocity, test_cases[x].distance)), 6)
if (is_almost_equal(angle, int(angle), 7)): angle = int(angle)
print('Case #' + str(x + 1) + ': ' + str(angle))
def write_all_results():
test_result_file = open("CaptainHammer.txt", "w")
for x in range(len(test_cases)):
angle = round(degrees(compute_angle(test_cases[x].velocity, test_cases[x].distance)), 6)
if (is_almost_equal(angle, int(angle), 7)): angle = int(angle)
test_result_file.write('Case #' + str(x + 1) + ': ' + str(angle) + '\n')
test_result_file.close()
test_case_file = open("CaptainHammer_B-small-practice.in", "r")
lines = test_case_file.read().split('\n')
n = int(lines[0])
test_cases = [0 for x in range(n)]
initialize_test_cases(lines)
write_all_results()
|
{
"content_hash": "3d2590e19b03a32486823fbac6642845",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 34.51111111111111,
"alnum_prop": 0.5660012878300065,
"repo_name": "laichunpongben/CodeJam",
"id": "a183ba88732b17155278aa2fd7e8b48915f8f142",
"size": "1688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/practice_round_apac/captain_hammer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "173566"
}
],
"symlink_target": ""
}
|
from importlib import import_module
from django.conf.urls import url, include
class PrefixedUrlconf(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def urlpatterns(self):
url_module = import_module('opendebates.urls')
return [
pattern
if (
not hasattr(pattern, 'urlconf_name') or
getattr(pattern.urlconf_name, '__name__', None) != 'opendebates.prefixed_urls'
) else
url(r'^{}/'.format(self.prefix), include('opendebates.prefixed_urls'))
for pattern in url_module.urlpatterns
]
|
{
"content_hash": "bad7a4373b72ca065c1aed7ab17e39b4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 29,
"alnum_prop": 0.5846394984326019,
"repo_name": "caktus/django-opendebates",
"id": "7c5c2b067734d2a393f7c8df035f7771d7943587",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "opendebates/resolvers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36323"
},
{
"name": "Dockerfile",
"bytes": "3213"
},
{
"name": "HTML",
"bytes": "62852"
},
{
"name": "JavaScript",
"bytes": "19013"
},
{
"name": "Python",
"bytes": "305156"
},
{
"name": "Shell",
"bytes": "2757"
}
],
"symlink_target": ""
}
|
import random
import time
import functools
import math
import os
import stat
import string
import logging
import threading
import io
from collections import defaultdict
from s3transfer.compat import rename_file
from s3transfer.compat import seekable
MAX_PARTS = 10000
# The maximum file size you can upload via S3 per request.
# See: http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
# and: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
MAX_SINGLE_UPLOAD_SIZE = 5 * (1024 ** 3)
MIN_UPLOAD_CHUNKSIZE = 5 * (1024 ** 2)
logger = logging.getLogger(__name__)
def random_file_extension(num_digits=8):
return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
def signal_not_transferring(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'signal_not_transferring'):
request.body.signal_not_transferring()
def signal_transferring(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'signal_transferring'):
request.body.signal_transferring()
def calculate_range_parameter(part_size, part_index, num_parts,
total_size=None):
"""Calculate the range parameter for multipart downloads/copies
:type part_size: int
:param part_size: The size of the part
:type part_index: int
:param part_index: The index for which this parts starts. This index starts
at zero
:type num_parts: int
:param num_parts: The total number of parts in the transfer
:returns: The value to use for Range parameter on downloads or
the CopySourceRange parameter for copies
"""
# Used to calculate the Range parameter
start_range = part_index * part_size
if part_index == num_parts - 1:
end_range = ''
if total_size is not None:
end_range = str(total_size - 1)
else:
end_range = start_range + part_size - 1
range_param = 'bytes=%s-%s' % (start_range, end_range)
return range_param
def get_callbacks(transfer_future, callback_type):
"""Retrieves callbacks from a subscriber
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future the subscriber is associated
to.
:type callback_type: str
:param callback_type: The type of callback to retrieve from the subscriber.
Valid types include:
* 'queued'
* 'progress'
* 'done'
:returns: A list of callbacks for the type specified. All callbacks are
preinjected with the transfer future.
"""
callbacks = []
for subscriber in transfer_future.meta.call_args.subscribers:
callback_name = 'on_' + callback_type
if hasattr(subscriber, callback_name):
callbacks.append(
functools.partial(
getattr(subscriber, callback_name),
future=transfer_future
)
)
return callbacks
def invoke_progress_callbacks(callbacks, bytes_transferred):
"""Calls all progress callbacks
:param callbacks: A list of progress callbacks to invoke
:param bytes_transferred: The number of bytes transferred. This is passed
to the callbacks. If no bytes were transferred the callbacks will not
be invoked because no progress was achieved. It is also possible
to receive a negative amount which comes from retrying a transfer
request.
"""
# Only invoke the callbacks if bytes were actually transferred.
if bytes_transferred:
for callback in callbacks:
callback(bytes_transferred=bytes_transferred)
def get_filtered_dict(original_dict, whitelisted_keys):
"""Gets a dictionary filtered by whitelisted keys
:param original_dict: The original dictionary of arguments to source keys
and values.
:param whitelisted_key: A list of keys to include in the filtered
dictionary.
:returns: A dictionary containing key/values from the original dictionary
whose key was included in the whitelist
"""
filtered_dict = {}
for key, value in original_dict.items():
if key in whitelisted_keys:
filtered_dict[key] = value
return filtered_dict
class CallArgs(object):
def __init__(self, **kwargs):
"""A class that records call arguments
The call arguments must be passed as keyword arguments. It will set
each keyword argument as an attribute of the object along with its
associated value.
"""
for arg, value in kwargs.items():
setattr(self, arg, value)
class FunctionContainer(object):
"""An object that contains a function and any args or kwargs to call it
When called the provided function will be called with provided args
and kwargs.
"""
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
def __repr__(self):
return 'Function: %s with args %s and kwargs %s' % (
self._func, self._args, self._kwargs)
def __call__(self):
return self._func(*self._args, **self._kwargs)
class CountCallbackInvoker(object):
"""An abstraction to invoke a callback when a shared count reaches zero
:param callback: Callback invoke when finalized count reaches zero
"""
def __init__(self, callback):
self._lock = threading.Lock()
self._callback = callback
self._count = 0
self._is_finalized = False
@property
def current_count(self):
with self._lock:
return self._count
def increment(self):
"""Increment the count by one"""
with self._lock:
if self._is_finalized:
raise RuntimeError(
'Counter has been finalized it can no longer be '
'incremented.'
)
self._count += 1
def decrement(self):
"""Decrement the count by one"""
with self._lock:
if self._count == 0:
raise RuntimeError(
'Counter is at zero. It cannot dip below zero')
self._count -= 1
if self._is_finalized and self._count == 0:
self._callback()
def finalize(self):
"""Finalize the counter
Once finalized, the counter never be incremented and the callback
can be invoked once the count reaches zero
"""
with self._lock:
self._is_finalized = True
if self._count == 0:
self._callback()
class OSUtils(object):
def get_file_size(self, filename):
return os.path.getsize(filename)
def open_file_chunk_reader(self, filename, start_byte, size, callbacks):
return ReadFileChunk.from_filename(filename, start_byte,
size, callbacks,
enable_callbacks=False)
def open_file_chunk_reader_from_fileobj(self, fileobj, chunk_size,
full_file_size, callbacks,
close_callbacks=None):
return ReadFileChunk(
fileobj, chunk_size, full_file_size,
callbacks=callbacks, enable_callbacks=False,
close_callbacks=close_callbacks)
def open(self, filename, mode):
return open(filename, mode)
def remove_file(self, filename):
"""Remove a file, noop if file does not exist."""
# Unlike os.remove, if the file does not exist,
# then this method does nothing.
try:
os.remove(filename)
except OSError:
pass
def rename_file(self, current_filename, new_filename):
rename_file(current_filename, new_filename)
def is_special_file(cls, filename):
"""Checks to see if a file is a special UNIX file.
It checks if the file is a character special device, block special
device, FIFO, or socket.
:param filename: Name of the file
:returns: True if the file is a special file. False, if is not.
"""
# If it does not exist, it must be a new file so it cannot be
# a special file.
if not os.path.exists(filename):
return False
mode = os.stat(filename).st_mode
# Character special device.
if stat.S_ISCHR(mode):
return True
# Block special device
if stat.S_ISBLK(mode):
return True
# Named pipe / FIFO
if stat.S_ISFIFO(mode):
return True
# Socket.
if stat.S_ISSOCK(mode):
return True
return False
class DeferredOpenFile(object):
def __init__(self, filename, start_byte=0, mode='rb', open_function=open):
"""A class that defers the opening of a file till needed
This is useful for deferring opening of a file till it is needed
in a separate thread, as there is a limit of how many open files
there can be in a single thread for most operating systems. The
file gets opened in the following methods: ``read()``, ``seek()``,
and ``__enter__()``
:type filename: str
:param filename: The name of the file to open
:type start_byte: int
:param start_byte: The byte to seek to when the file is opened.
:type mode: str
:param mode: The mode to use to open the file
:type open_function: function
:param open_function: The function to use to open the file
"""
self._filename = filename
self._fileobj = None
self._start_byte = start_byte
self._mode = mode
self._open_function = open_function
def _open_if_needed(self):
if self._fileobj is None:
self._fileobj = self._open_function(self._filename, self._mode)
if self._start_byte != 0:
self._fileobj.seek(self._start_byte)
@property
def name(self):
return self._filename
def read(self, amount=None):
self._open_if_needed()
return self._fileobj.read(amount)
def write(self, data):
self._open_if_needed()
self._fileobj.write(data)
def seek(self, where):
self._open_if_needed()
self._fileobj.seek(where)
def tell(self):
if self._fileobj is None:
return self._start_byte
return self._fileobj.tell()
def close(self):
if self._fileobj:
self._fileobj.close()
def __enter__(self):
self._open_if_needed()
return self
def __exit__(self, *args, **kwargs):
self.close()
class ReadFileChunk(object):
def __init__(self, fileobj, chunk_size, full_file_size,
callbacks=None, enable_callbacks=True, close_callbacks=None):
"""
Given a file object shown below::
|___________________________________________________|
0 | | full_file_size
|----chunk_size---|
f.tell()
:type fileobj: file
:param fileobj: File like object
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callbacks: A list of function(amount_read)
:param callbacks: Called whenever data is read from this object in the
order provided.
:type enable_callbacks: boolean
:param enable_callbacks: True if to run callbacks. Otherwise, do not
run callbacks
:type close_callbacks: A list of function()
:param close_callbacks: Called when close is called. The function
should take no arguments.
"""
self._fileobj = fileobj
self._start_byte = self._fileobj.tell()
self._size = self._calculate_file_size(
self._fileobj, requested_size=chunk_size,
start_byte=self._start_byte, actual_file_size=full_file_size)
self._amount_read = 0
self._callbacks = callbacks
if callbacks is None:
self._callbacks = []
self._callbacks_enabled = enable_callbacks
self._close_callbacks = close_callbacks
if close_callbacks is None:
self._close_callbacks = close_callbacks
@classmethod
def from_filename(cls, filename, start_byte, chunk_size, callbacks=None,
enable_callbacks=True):
"""Convenience factory function to create from a filename.
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callbacks: function(amount_read)
:param callbacks: Called whenever data is read from this object.
:type enable_callbacks: bool
:param enable_callbacks: Indicate whether to invoke callback
during read() calls.
:rtype: ``ReadFileChunk``
:return: A new instance of ``ReadFileChunk``
"""
f = open(filename, 'rb')
f.seek(start_byte)
file_size = os.fstat(f.fileno()).st_size
return cls(f, chunk_size, file_size, callbacks, enable_callbacks)
def _calculate_file_size(self, fileobj, requested_size, start_byte,
actual_file_size):
max_chunk_size = actual_file_size - start_byte
return min(max_chunk_size, requested_size)
def read(self, amount=None):
if amount is None:
amount_to_read = self._size - self._amount_read
else:
amount_to_read = min(self._size - self._amount_read, amount)
data = self._fileobj.read(amount_to_read)
self._amount_read += len(data)
if self._callbacks is not None and self._callbacks_enabled:
invoke_progress_callbacks(self._callbacks, len(data))
return data
def signal_transferring(self):
self.enable_callback()
if hasattr(self._fileobj, 'signal_transferring'):
self._fileobj.signal_transferring()
def signal_not_transferring(self):
self.disable_callback()
if hasattr(self._fileobj, 'signal_not_transferring'):
self._fileobj.signal_not_transferring()
def enable_callback(self):
self._callbacks_enabled = True
def disable_callback(self):
self._callbacks_enabled = False
def seek(self, where):
self._fileobj.seek(self._start_byte + where)
if self._callbacks is not None and self._callbacks_enabled:
# To also rewind the callback() for an accurate progress report
invoke_progress_callbacks(
self._callbacks, bytes_transferred=where - self._amount_read)
self._amount_read = where
def close(self):
if self._close_callbacks is not None and self._callbacks_enabled:
for callback in self._close_callbacks:
callback()
self._fileobj.close()
def tell(self):
return self._amount_read
def __len__(self):
# __len__ is defined because requests will try to determine the length
# of the stream to set a content length. In the normal case
# of the file it will just stat the file, but we need to change that
# behavior. By providing a __len__, requests will use that instead
# of stat'ing the file.
return self._size
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
# This is a workaround for http://bugs.python.org/issue17575
# Basically httplib will try to iterate over the contents, even
# if its a file like object. This wasn't noticed because we've
# already exhausted the stream so iterating over the file immediately
# stops, which is what we're simulating here.
return iter([])
class StreamReaderProgress(object):
"""Wrapper for a read only stream that adds progress callbacks."""
def __init__(self, stream, callbacks=None):
self._stream = stream
self._callbacks = callbacks
if callbacks is None:
self._callbacks = []
def read(self, *args, **kwargs):
value = self._stream.read(*args, **kwargs)
invoke_progress_callbacks(self._callbacks, len(value))
return value
class NoResourcesAvailable(Exception):
pass
class TaskSemaphore(object):
def __init__(self, count):
"""A semaphore for the purpose of limiting the number of tasks
:param count: The size of semaphore
"""
self._semaphore = threading.Semaphore(count)
def acquire(self, tag, blocking=True):
"""Acquire the semaphore
:param tag: A tag identifying what is acquiring the semaphore. Note
that this is not really needed to directly use this class but is
needed for API compatibility with the SlidingWindowSemaphore
implementation.
:param block: If True, block until it can be acquired. If False,
do not block and raise an exception if cannot be aquired.
:returns: A token (can be None) to use when releasing the semaphore
"""
logger.debug("Acquiring %s", tag)
if not self._semaphore.acquire(blocking):
raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
def release(self, tag, acquire_token):
"""Release the semaphore
:param tag: A tag identifying what is releasing the semaphore
:param acquire_token: The token returned from when the semaphore was
acquired. Note that this is not really needed to directly use this
class but is needed for API compatibility with the
SlidingWindowSemaphore implementation.
"""
logger.debug("Releasing acquire %s/%s" % (tag, acquire_token))
self._semaphore.release()
class SlidingWindowSemaphore(TaskSemaphore):
"""A semaphore used to coordinate sequential resource access.
This class is similar to the stdlib BoundedSemaphore:
* It's initialized with a count.
* Each call to ``acquire()`` decrements the counter.
* If the count is at zero, then ``acquire()`` will either block until the
count increases, or if ``blocking=False``, then it will raise
a NoResourcesAvailable exception indicating that it failed to acquire the
semaphore.
The main difference is that this semaphore is used to limit
access to a resource that requires sequential access. For example,
if I want to access resource R that has 20 subresources R_0 - R_19,
this semaphore can also enforce that you only have a max range of
10 at any given point in time. You must also specify a tag name
when you acquire the semaphore. The sliding window semantics apply
on a per tag basis. The internal count will only be incremented
when the minimum sequence number for a tag is released.
"""
def __init__(self, count):
self._count = count
# Dict[tag, next_sequence_number].
self._tag_sequences = defaultdict(int)
self._lowest_sequence = {}
self._lock = threading.Lock()
self._condition = threading.Condition(self._lock)
# Dict[tag, List[sequence_number]]
self._pending_release = {}
def current_count(self):
with self._lock:
return self._count
def acquire(self, tag, blocking=True):
logger.debug("Acquiring %s", tag)
self._condition.acquire()
try:
if self._count == 0:
if not blocking:
raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
else:
while self._count == 0:
self._condition.wait()
# self._count is no longer zero.
# First, check if this is the first time we're seeing this tag.
sequence_number = self._tag_sequences[tag]
if sequence_number == 0:
# First time seeing the tag, so record we're at 0.
self._lowest_sequence[tag] = sequence_number
self._tag_sequences[tag] += 1
self._count -= 1
return sequence_number
finally:
self._condition.release()
def release(self, tag, acquire_token):
sequence_number = acquire_token
logger.debug("Releasing acquire %s/%s", tag, sequence_number)
self._condition.acquire()
try:
if tag not in self._tag_sequences:
raise ValueError("Attempted to release unknown tag: %s" % tag)
max_sequence = self._tag_sequences[tag]
if self._lowest_sequence[tag] == sequence_number:
# We can immediately process this request and free up
# resources.
self._lowest_sequence[tag] += 1
self._count += 1
self._condition.notify()
queued = self._pending_release.get(tag, [])
while queued:
if self._lowest_sequence[tag] == queued[-1]:
queued.pop()
self._lowest_sequence[tag] += 1
self._count += 1
else:
break
elif self._lowest_sequence[tag] < sequence_number < max_sequence:
# We can't do anything right now because we're still waiting
# for the min sequence for the tag to be released. We have
# to queue this for pending release.
self._pending_release.setdefault(
tag, []).append(sequence_number)
self._pending_release[tag].sort(reverse=True)
else:
raise ValueError(
"Attempted to release unknown sequence number "
"%s for tag: %s" % (sequence_number, tag))
finally:
self._condition.release()
class ChunksizeAdjuster(object):
def __init__(self, max_size=MAX_SINGLE_UPLOAD_SIZE,
min_size=MIN_UPLOAD_CHUNKSIZE, max_parts=MAX_PARTS):
self.max_size = max_size
self.min_size = min_size
self.max_parts = max_parts
def adjust_chunksize(self, current_chunksize, file_size=None):
"""Get a chunksize close to current that fits within all S3 limits.
:type current_chunksize: int
:param current_chunksize: The currently configured chunksize.
:type file_size: int or None
:param file_size: The size of the file to upload. This might be None
if the object being transferred has an unknown size.
:returns: A valid chunksize that fits within configured limits.
"""
chunksize = current_chunksize
if file_size is not None:
chunksize = self._adjust_for_max_parts(chunksize, file_size)
return self._adjust_for_chunksize_limits(chunksize)
def _adjust_for_chunksize_limits(self, current_chunksize):
if current_chunksize > self.max_size:
logger.debug(
"Chunksize greater than maximum chunksize. "
"Setting to %s from %s." % (self.max_size, current_chunksize))
return self.max_size
elif current_chunksize < self.min_size:
logger.debug(
"Chunksize less than minimum chunksize. "
"Setting to %s from %s." % (self.min_size, current_chunksize))
return self.min_size
else:
return current_chunksize
def _adjust_for_max_parts(self, current_chunksize, file_size):
chunksize = current_chunksize
num_parts = int(math.ceil(file_size / float(chunksize)))
while num_parts > self.max_parts:
chunksize *= 2
num_parts = int(math.ceil(file_size / float(chunksize)))
if chunksize != current_chunksize:
logger.debug(
"Chunksize would result in the number of parts exceeding the "
"maximum. Setting to %s from %s." %
(chunksize, current_chunksize))
return chunksize
|
{
"content_hash": "d682e7d9f80f5274a5cb648d5d2ce8e2",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 79,
"avg_line_length": 35.677142857142854,
"alnum_prop": 0.6031873148073997,
"repo_name": "VirtueSecurity/aws-extender",
"id": "5ca4d9f066236188d803d962bb3c293e6bd3e8ac",
"size": "25535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BappModules/s3transfer/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46260"
}
],
"symlink_target": ""
}
|
from pyramid.view import view_config, forbidden_view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.security import remember
from pyramid.session import signed_serialize
from pyramid_ldap import get_ldap_connector
import logging
from twonicornweb.views import (
site_layout,
local_authenticate,
get_user,
)
log = logging.getLogger(__name__)
@view_config(route_name='login', renderer='twonicornweb:templates/login.pt')
@forbidden_view_config(renderer='twonicornweb:templates/login.pt')
def login(request):
page_title = 'Login'
user = get_user(request)
if request.referer:
referer_host = request.referer.split('/')[2]
else:
referer_host = None
if request.referer and referer_host == request.host and request.referer.split('/')[3][:6] != 'logout':
return_url = request.referer
elif request.path != '/login':
return_url = request.url
else:
return_url = '/applications'
login = ''
password = ''
error = ''
if 'form.submitted' in request.POST:
login = request.POST['login']
password = request.POST['password']
# AD/LDAP
if request.registry.settings['tcw.auth_mode'] == 'ldap':
connector = get_ldap_connector(request)
data = connector.authenticate(login, password)
# LOCAL
else:
data = local_authenticate(login, password)
if data is not None:
dn = data[0]
encrypted = signed_serialize(login, request.registry.settings['tcw.cookie_token'])
headers = remember(request, dn)
headers.append(('Set-Cookie', 'un=' + str(encrypted) + '; Max-Age=604800; Path=/'))
return HTTPFound(request.POST['return_url'], headers=headers)
else:
error = 'Invalid credentials'
if request.authenticated_userid:
if request.path == '/login':
error = 'You are already logged in'
page_title = 'Already Logged In'
else:
error = 'You do not have permission to access this page'
page_title = 'Access Denied'
return {'layout': site_layout(),
'page_title': page_title,
'user': user,
'return_url': return_url,
'login': login,
'password': password,
'error': error,
}
|
{
"content_hash": "266d48b8953437d97679a157813c7e04",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 106,
"avg_line_length": 31,
"alnum_prop": 0.6036866359447005,
"repo_name": "CityGrid/twonicorn",
"id": "9b00f50128e8846c53eabe30822d8d630ad08fef",
"size": "2982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twonicornweb/views/login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18174"
},
{
"name": "JavaScript",
"bytes": "3660"
},
{
"name": "Python",
"bytes": "169658"
},
{
"name": "Ruby",
"bytes": "730"
},
{
"name": "Shell",
"bytes": "8111"
},
{
"name": "TSQL",
"bytes": "24554"
}
],
"symlink_target": ""
}
|
'''
You are given a string, s, and a list of words, words, that are all of the same length. Find all starting indices of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
For example, given:
s: "barfoothefoobarman"
words: ["foo", "bar"]
You should return the indices: [0,9].
(order does not matter).
'''
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
s_length = len(s)
word_num = len(words)
word_length = len(words[0])
words_length = word_num * word_length
result = []
words_dict = {}
for word in words:
words_dict[word] = words_dict[word] + 1 if word in words_dict else 1
for i in range(word_length):
left = i
right = i
curr_dict = {}
while right + word_length <= s_length:
word = s[right:right + word_length]
right += word_length
if word in words_dict:
curr_dict[word] = curr_dict[word] + 1 if word in curr_dict else 1
while curr_dict[word] > words_dict[word]:
curr_dict[s[left:left + word_length]] -= 1
left += word_length
if right - left == words_length:
result.append(left)
else:
curr_dict.clear()
left = right
return result
if __name__ == "__main__":
assert Solution().findSubstring("barfoothefoobarman", ["foo", "bar"]) == [0, 9]
|
{
"content_hash": "017db56059d3fad7660c26518f6f2ca1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 233,
"avg_line_length": 36.354166666666664,
"alnum_prop": 0.5077363896848137,
"repo_name": "gavinfish/leetcode-share",
"id": "f3bac4995c6f65f784f50e4d214332ad33ff9ba0",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/030 Substring with Concatenation of All Words.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "81458"
},
{
"name": "Python",
"bytes": "222883"
}
],
"symlink_target": ""
}
|
"""Tests the load_orgs_pipeline."""
from tests.unittest_utils import ForsetiTestCase
import json
import mock
# pylint: disable=line-too-long
from google.cloud.security.common.data_access import errors as data_access_errors
from google.cloud.security.common.data_access import organization_dao as org_dao
from google.cloud.security.common.gcp_api import cloud_resource_manager as crm
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import load_orgs_pipeline
from tests.inventory.pipelines.test_data import fake_configs
from tests.inventory.pipelines.test_data import fake_orgs
# pylint: enable=line-too-long
def _setup_raw_orgs():
fakes = [o for res in fake_orgs.FAKE_ORGS \
for o in res.get('organizations', [])]
for (i, o) in enumerate(fake_orgs.EXPECTED_LOADABLE_ORGS):
fake_orgs.EXPECTED_LOADABLE_ORGS[i]['raw_org'] = json.dumps(fakes[i])
class LoadOrgsPipelineTest(ForsetiTestCase):
"""Tests for the load_orgs_pipeline."""
@classmethod
def setUpClass(cls):
"""Set up before running the class tests."""
_setup_raw_orgs()
def setUp(self):
"""Set up."""
self.cycle_timestamp = '20001225T120000Z'
self.configs = fake_configs.FAKE_CONFIGS
self.mock_crm = mock.create_autospec(crm.CloudResourceManagerClient)
self.mock_dao = mock.create_autospec(org_dao.OrganizationDao)
self.pipeline = (
load_orgs_pipeline.LoadOrgsPipeline(
self.cycle_timestamp,
self.configs,
self.mock_crm,
self.mock_dao))
def test_can_transform_orgs(self):
"""Test that orgs can be transformed."""
orgs = list(self.pipeline._transform(fake_orgs.FAKE_ORGS))
self.assertEquals(fake_orgs.EXPECTED_LOADABLE_ORGS, orgs)
def test_api_is_called_to_retrieve_orgs(self):
"""Test that api is called to retrieve orgs."""
self.pipeline._retrieve()
self.pipeline.api_client.get_organizations.assert_called_once_with(
self.pipeline.RESOURCE_NAME)
def test_retrieve_errors_are_handled(self):
"""Test that errors are handled when retrieving."""
self.pipeline.api_client.get_organizations.side_effect = (
api_errors.ApiExecutionError('11111', mock.MagicMock()))
with self.assertRaises(inventory_errors.LoadDataPipelineError):
self.pipeline._retrieve()
@mock.patch.object(
load_orgs_pipeline.LoadOrgsPipeline,
'_get_loaded_count')
@mock.patch.object(
load_orgs_pipeline.LoadOrgsPipeline,
'_load')
@mock.patch.object(
load_orgs_pipeline.LoadOrgsPipeline,
'_transform')
@mock.patch.object(
load_orgs_pipeline.LoadOrgsPipeline,
'_retrieve')
def test_subroutines_are_called_by_run(self, mock_retrieve, mock_transform,
mock_load, mock_get_loaded_count):
"""Test that the subroutines are called by run."""
mock_retrieve.return_value = fake_orgs.FAKE_ORGS
mock_transform.return_value = fake_orgs.EXPECTED_LOADABLE_ORGS
self.pipeline.run()
mock_retrieve.assert_called_once_with()
mock_transform.assert_called_once_with(fake_orgs.FAKE_ORGS)
mock_load.assert_called_once_with(
self.pipeline.RESOURCE_NAME,
fake_orgs.EXPECTED_LOADABLE_ORGS)
mock_get_loaded_count.assert_called_once
|
{
"content_hash": "eb1e799361ec9f81fea628bca0d4717b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 81,
"avg_line_length": 37.270833333333336,
"alnum_prop": 0.6743991056456121,
"repo_name": "thenenadx/forseti-security",
"id": "1f972f930f907463a764e1346e329c0ba1e424a3",
"size": "4153",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/inventory/pipelines/load_orgs_pipeline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5851"
},
{
"name": "Protocol Buffer",
"bytes": "10441"
},
{
"name": "Python",
"bytes": "1985604"
},
{
"name": "Shell",
"bytes": "2737"
}
],
"symlink_target": ""
}
|
import os
from oslo_config import cfg
from oslo_log import log as logging
from bork_api.clients.git_client import RepoManager
from bork_api.clients.storage_client import LocalStorage
from bork_api.models import Image, Deployment, Recipe, CookBook
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def images_cleanup():
""" Cleanup image db"""
LOG.debug("Cleanup old Images")
Image.objects.all().delete()
def deployments_cleanup():
""" Cleanup previous deployments """
LOG.info("Cleanup old Deployments")
Deployment.objects.all().delete()
def recipes_cleanup():
""" Cleanup previous recipes """
LOG.info("Cleanup old Recipes")
Recipe.objects.all().delete()
def cookbooks_cleanup():
""" Cleanup previously downloaded cookbooks """
LOG.info("Cleanup old Cookbooks")
CookBook.objects.all().delete()
LocalStorage().reset()
def cookbooks_add():
"""Add local cookbooks to db"""
l = LocalStorage()
for user in l.list_users():
repo = RepoManager(user)
for cb in l.list_cookbooks(user):
system = l.find_system(cb)
LOG.info("Adding cookbook %s" % cb)
cb = CookBook()
cb.name = cb
cb.system = system
cb.version = repo.version
cb.path = os.path.join(l.path, cb)
cb.user = user
cb.save()
for r in l.list_recipes(cb.path):
ro = Recipe()
ro.name = r
ro.cookbook = cb
ro.version = repo.browse_file(r)
ro.system = system
ro.user = user
ro.save()
def recipes_add():
""" Add detected recipes based on local cookbooks to db"""
l = LocalStorage()
for user in l.list_users():
repo = RepoManager(user)
for cb in l.list_cookbooks(user):
system = l.find_system(cb)
for r in l.list_recipes(cb):
ro = Recipe()
ro.name = r
ro.cookbook = cb
ro.version = repo.browse_file(r)
ro.system = system
ro.user = user
ro.save()
|
{
"content_hash": "32bda931295e390555674d0c4deb82a8",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 63,
"avg_line_length": 27.59493670886076,
"alnum_prop": 0.5619266055045872,
"repo_name": "Fiware/ops.Validator",
"id": "671c072a9b543c8831adc380c1d237cf3429d1fe",
"size": "2759",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "validator_api/bork_api/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "281"
},
{
"name": "CSS",
"bytes": "7529"
},
{
"name": "Dockerfile",
"bytes": "12322"
},
{
"name": "HTML",
"bytes": "3863"
},
{
"name": "JavaScript",
"bytes": "345113"
},
{
"name": "Python",
"bytes": "291979"
},
{
"name": "Ruby",
"bytes": "10189"
},
{
"name": "Shell",
"bytes": "5963"
}
],
"symlink_target": ""
}
|
from .arm_client import AzureResourceManagerClient
from .compute_client import AzureComputeClient
from .graph_client import AzureGraphClient
from .network_client import AzureNetworkClient
from .pricing_client import AzurePricingClient
from .resources_client import AzureResourcesClient
__all__ = [
'AzureResourceManagerClient',
'AzureComputeClient',
'AzureGraphClient',
'AzureNetworkClient',
'AzurePricingClient',
'AzureResourcesClient',
]
|
{
"content_hash": "64db949a69bc77b3178b21fe7db4e798",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 50,
"avg_line_length": 31,
"alnum_prop": 0.7935483870967742,
"repo_name": "hail-is/hail",
"id": "4e9d523073a73e54b12a5e7cb3633fd2a980fb45",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/hailtop/aiocloud/aioazure/client/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from nova import crypto
from nova import flags
from nova import manager
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CertManager(manager.Manager):
RPC_API_VERSION = '1.0'
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project"""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project"""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project"""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
|
{
"content_hash": "8e41ef06c87589727352f86322f074bb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 32.127272727272725,
"alnum_prop": 0.6632710809281268,
"repo_name": "usc-isi/nova",
"id": "4cc392d692bbc31343b2a441999e4d6e74f3181e",
"size": "2419",
"binary": false,
"copies": "7",
"ref": "refs/heads/hpc-trunk",
"path": "nova/cert/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7282590"
},
{
"name": "Shell",
"bytes": "42905"
}
],
"symlink_target": ""
}
|
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions.util import * # pylint: disable=wildcard-import
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape
`[batch_size, num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
mixture_weight_vector.shape.assert_has_rank(2)
if not mean_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not stddev_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = array_ops.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = array_ops.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = array_ops.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = math_ops.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = array_ops.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(sigma_for_dot_prod))
var_wa = array_ops.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(mu_for_dot_prod))
sq_mean_wa = array_ops.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - math_ops.square(mean_wa)
return math_ops.sqrt(mixture_variance)
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
def make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
x,
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero")], x)
with ops.name_scope(name, "make_diag_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
return linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError(
"Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
return linalg.LinearOperatorIdentity(
num_rows=shape_hint,
dtype=loc.dtype.base_dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return linalg.LinearOperatorScaledIdentity(
num_rows=shape_hint,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `N-D` `Tensor` with `N >= 1` (already converted to tensor) or `None`.
If `None`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_const = tensor_util.constant_value(event_size)
if event_size_const is not None:
event_shape = event_size_const.reshape([1])
else:
event_shape = event_size[array_ops.newaxis]
# Static check that event shapes match.
if loc is not None:
loc_event_size = loc.get_shape()[-1].value
if loc_event_size is not None and event_size_const is not None:
if loc_event_size != 1 and loc_event_size != event_size_const:
raise ValueError(
"Event size of 'scale' (%d) could not be broadcast up to that of "
"'loc' (%d)." % (loc_event_size, event_size_const))
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is None:
batch_shape_const = tensor_util.constant_value(batch_shape)
batch_shape = (
batch_shape_const if batch_shape_const is not None else batch_shape)
else:
loc_batch_shape = loc.get_shape().with_rank_at_least(1)[:-1]
if (loc.get_shape().ndims is None or
not loc_batch_shape.is_fully_defined()):
loc_batch_shape = array_ops.shape(loc)[:-1]
else:
loc_batch_shape = ops.convert_to_tensor(loc_batch_shape,
name="loc_batch_shape")
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape)
return batch_shape, event_shape
def prefer_static_broadcast_shape(
shape1, shape2, name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
def make_shape_tensor(x):
return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)
def get_tensor_shape(s):
if isinstance(s, tensor_shape.TensorShape):
return s
s_ = tensor_util.constant_value(make_shape_tensor(s))
if s_ is not None:
return tensor_shape.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tensor_shape.TensorShape):
return make_shape_tensor(s)
if s.is_fully_defined():
return make_shape_tensor(s.as_list())
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return array_ops.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
if s_shape.is_fully_defined():
return s_shape.as_list()
# Fallback on dynamic.
d_shape = array_ops.shape(tensors[0])
for t in tensors[1:]:
d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
return d_shape
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, linalg.LinearOperatorIdentity) or
isinstance(scale, linalg.LinearOperatorScaledIdentity) or
isinstance(scale, linalg.LinearOperatorDiag))
def maybe_check_scalar_distribution(
distribution, expected_base_dtype, validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks:
(i) check that reparameterization_type is `FULLY_REPARAMETERIZED`.
(ii) add `tf.Assert` ops to the graph to enforce that distribution
is scalar in the event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
distribution.dtype.name, expected_base_dtype.name))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type
!= distribution_lib.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with ops.name_scope(name="check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(check_ops.assert_equal(
is_scalar, True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
|
{
"content_hash": "6d93f6323adffab96116f000e4067d21",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 89,
"avg_line_length": 39.8421052631579,
"alnum_prop": 0.67889035667107,
"repo_name": "ychfan/tensorflow",
"id": "869b5698e57d199755ce1686a74a1eafe3b73e7d",
"size": "19614",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/distribution_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "206941"
},
{
"name": "C++",
"bytes": "31053371"
},
{
"name": "CMake",
"bytes": "650214"
},
{
"name": "Go",
"bytes": "1001136"
},
{
"name": "Java",
"bytes": "441709"
},
{
"name": "Jupyter Notebook",
"bytes": "1940755"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38533"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "28819616"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "382741"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
check_is_fitted(self, 'coef_')
DeprecationWarning("``coefs`` attribute has been deprecated and will be "
"removed in version 0.17. Use ``coef_`` instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d with "
"X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
{
"content_hash": "6cda6bb8a958354cadc7f6bf843aa658",
"timestamp": "",
"source": "github",
"line_count": 796,
"max_line_length": 81,
"avg_line_length": 36.13944723618091,
"alnum_prop": 0.5801439149024924,
"repo_name": "lazywei/scikit-learn",
"id": "8e77bd5eb0f75552484ac9a675f1fe444f65a306",
"size": "28767",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sklearn/cross_decomposition/pls_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1370"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5900421"
},
{
"name": "Shell",
"bytes": "3952"
}
],
"symlink_target": ""
}
|
"""Add host column to EPG and concrete devices
Revision ID: 1b58ffa871bb
Revises: f1ca776aafab
Create Date: 2018-03-12 12:23:39.608507
"""
# revision identifiers, used by Alembic.
revision = '1b58ffa871bb'
down_revision = 'f1ca776aafab'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from aim.db.migration.data_migration import add_host_column
def upgrade():
op.add_column(
'aim_endpoint_group_static_paths',
sa.Column('host', sa.String(255), nullable=True, index=True)
)
op.add_column(
'aim_concrete_device_ifs',
sa.Column('host', sa.String(255), nullable=True, index=True)
)
op.add_column(
'aim_device_cluster_devices',
sa.Column('host', sa.String(255), nullable=True, index=True)
)
op.add_column(
'aim_l3out_interfaces',
sa.Column('host', sa.String(255), nullable=True, index=True)
)
session = sa.orm.Session(bind=op.get_bind(), autocommit=True)
add_host_column.migrate(session)
def downgrade():
pass
|
{
"content_hash": "593a2b771620571f93e013f75ce9b093",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 24.651162790697676,
"alnum_prop": 0.6679245283018868,
"repo_name": "noironetworks/aci-integration-module",
"id": "5e13a52a7f94def6edbe34eb46baece5c82a7707",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aim/db/migration/alembic_migrations/versions/1b58ffa871bb_add_host_column.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1899856"
},
{
"name": "Roff",
"bytes": "437"
},
{
"name": "Shell",
"bytes": "2552"
}
],
"symlink_target": ""
}
|
import csv
import datetime
__author__ = 'abdullah'
from api.models import Content, Device, Software, StageMaterial, Contest, Composition, User
from django.core.management import BaseCommand
class Command(BaseCommand):
# Show this when the user types help
help = "Create base contents and save to database"
# A command must define handle()
def handle(self, *args, **options):
try:
contest = Contest.objects.all()
contest.delete()
contest_2014 = Contest()
contest_2014.year = 2014
contest_2014.theme = 'Ritim'
contest_2014.poster = 'http://beste.halici.com.tr/afis/2014.jpg'
contest_2014.start_date = datetime.datetime(2014, 6, 1, 0, 0)
contest_2014.final_date = datetime.datetime(2014, 11, 1, 0, 0)
contest_2014.finish_date = datetime.datetime(2014, 12, 15, 0, 0)
contest_2014.save()
contest_2015 = Contest()
contest_2015.year = 2015
contest_2014.theme='Attila Özdemiroğlu'
contest_2014.poster = 'http://beste.halici.com.tr/afis/2015.jpg'
contest_2015.start_date = datetime.datetime(2015, 6, 1, 0, 0)
contest_2015.final_date = datetime.datetime(2015, 11, 1, 0, 0)
contest_2015.finish_date = datetime.datetime(2015, 12, 15, 0, 0)
contest_2015.save()
devices = Device.objects.all()
devices.delete()
device = Device()
device.name = 'Bilgisayar'
device.save()
softwares = Software.objects.all()
softwares.delete()
software = Software()
software.name = 'FL Studio'
software.save()
stage_materials = StageMaterial.objects.all()
stage_materials.delete()
stage_material = StageMaterial()
stage_material.name = 'Gitar'
stage_material.save()
contents = Content.objects.all()
contents.delete()
content_file = open('default_contents/api_content.csv', 'rb')
reader = csv.reader(content_file)
for row in reader:
content = Content()
content.create_date = row[1]
content.title = row[2]
content.link = row[3]
content.content = row[4]
content.category_name = row[5]
content.is_active = True if row[7] == '1' else False
content.is_for_static_content = True if row[8] == '1' else False
content.save()
content_vote=Content()
content_vote.title = 'Oylama'
content_vote.link='oylama'
content_vote.category_name = 'link'
content_vote.is_active = True
content_vote.is_for_static_content = False
content_vote.save()
users = User.objects.all()
users.delete()
user = User()
user.username = 'karacabey'
user.email = 'siyahsuskunluk@gmail.com'
user.first_name = 'Abdullah'
user.last_name = 'Karacabey'
user.set_password('674311')
user.addres = 'Bağlum'
user.city = 'Ankara'
user.save()
compositions = Composition.objects.all()
compositions.delete()
composition_1 = Composition()
composition_1.owner=user
composition_1.contest=contest_2014
composition_1.name = 'İlk Şarkı'
composition_1.url = 'https://s3-eu-west-1.amazonaws.com/gong-ir/temp_attachments/beste/1441108257899wjzK3Htv'
composition_1.save()
composition_1.softwares = [software]
composition_1.devices = [device]
composition_1.stage_materials = [stage_material]
composition_2 = Composition(owner=user, contest=contest_2014)
composition_2.name = 'İkinci Şarkı'
composition_2.url = 'https://s3-eu-west-1.amazonaws.com/gong-ir/temp_attachments/beste/1441108257899wjzK3Htv'
composition_2.save()
composition_2.softwares = [software]
composition_2.devices = [device]
composition_2.stage_materials = [stage_material]
composition_3 = Composition(owner=user, contest=contest_2015)
composition_3.name = 'Üçüncü Şarkı'
composition_3.url = 'https://s3-eu-west-1.amazonaws.com/gong-ir/temp_attachments/beste/1441108257899wjzK3Htv'
composition_3.save()
composition_3.softwares = [software]
composition_3.devices = [device]
composition_3.stage_materials = [stage_material]
composition_4 = Composition(owner=user, contest=contest_2015)
composition_4.name = 'Dördüncü Şarkı'
composition_4.url = 'https://s3-eu-west-1.amazonaws.com/gong-ir/temp_attachments/beste/1441108257899wjzK3Htv'
composition_4.save()
composition_4.softwares = [software]
composition_4.devices = [device]
composition_4.stage_materials = [stage_material]
composition_1.save()
composition_2.save()
composition_3.save()
composition_4.save()
print 'Default contents are created'
except IOError:
print 'File is not found'
|
{
"content_hash": "4e945dd31f5bc3d0948111dec014f7b3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 121,
"avg_line_length": 36.97959183673469,
"alnum_prop": 0.5732155997056659,
"repo_name": "haliciyazilim/beste-yarismasi",
"id": "7995d338c38efb3d51f0060dc7ad300f287be6a7",
"size": "5480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/api/management/commands/createContents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1955"
},
{
"name": "HTML",
"bytes": "37486"
},
{
"name": "JavaScript",
"bytes": "57342"
},
{
"name": "Python",
"bytes": "56890"
}
],
"symlink_target": ""
}
|
""" Utilities for managing the sending of messages via SMS and other channels
"""
import squawk.gateway
import squawk.models
import uuid
from collections import defaultdict
from django.template import Context
from django.template import Template
from django.conf import settings
from squawk import DisabledContactError
from squawk import DisabledGroupError
from squawk import DisabledEventError
from squawk import InvalidContactError
from squawk import InvalidGroupError
from squawk import InvalidEventError
from squawk import PartialSendError
from squawk.models import Contact
from squawk.models import ContactGroup
from squawk.models import Event
from squawk.models import TransmissionLog
from squawk.tasks import transmit
def create_notification_id():
return str(uuid.uuid4()).replace('-','')
def enqueue(api_user, notification_id, notification_type, notification_slug,
contacts, message):
""" Enqueue message(s) to a single contact endpoint """
# group transmission logs according to endpoint
endpoint_groups = defaultdict(list)
for contact in contacts:
if not contact.enabled:
continue
for ep in contact.contactendpoint_set.all():
if not ep.enabled:
continue
tl = TransmissionLog(notification_id = notification_id,
gateway_response = '',
api_user = api_user,
notification_type = notification_type,
notification_slug = notification_slug,
contact = contact,
end_point = ep.end_point,
address = ep.address,
message = message,
enqueued = True,
send_ok = False,
delivery_confirmed = False,
gateway_status = '',
charge = None
)
tl.save()
endpoint_groups[ep.end_point].append(tl)
# dispatch to gateways for each type of message
if settings.SEND_IN_PROCESS:
mthd = transmit
else:
mthd = transmit.delay
# boolean refers to whether the endpoint batches output (True)- ie email has all
# addresses as BCC - or not (False) such as Twitter, where one API call per recipient
# is required
# @todo move this to somewhere else - settings maybe. Alternatively, do it in the gateway.
# the twitter gateway could get called then itself cascade out the individual calls.
# @todo this sucks because some SMS end-points do not take bulk requests
endpoints = dict([(squawk.models.SMS, True),
(squawk.models.TWITTER, False),
(squawk.models.EMAIL, True),
(squawk.models.WEBHOOK, False),
])
for ep in endpoint_groups.keys():
batch = endpoints[ep]
if batch:
mthd([t.id for t in endpoint_groups[ep]], ep)
else:
[ mthd([x.id], ep) for x in endpoint_groups[ep] ]
def message_contact(api_user, user_slug, message):
""" Message a single contact.
@todo some handling if GATEWAY is None or invalid
"""
notification_id = create_notification_id()
try:
contact = Contact.objects.get(slug = user_slug)
if contact.enabled:
enqueue(api_user, notification_id, 'contact', user_slug, [contact], message)
else:
raise DisabledContactError(
message = "Contact '%s' (%s) is disabled" % (contact.name, user_slug))
except Contact.DoesNotExist:
raise InvalidContactError(message = "%s not found" % user_slug)
return notification_id
def message_group(api_user, group_slug, message):
""" Message a group of contacts.
@todo some handling if GATEWAY is None or invalid
"""
notification_id = create_notification_id()
SEND_ERROR = None
try:
group = ContactGroup.objects.get(slug = group_slug)
if not group.enabled:
raise DisabledGroupError(message = "Group %s is disabled" % group.slug)
contacts = [c for c in group.contacts.all() if c.enabled]
try:
enqueue(api_user, notification_id, 'group', group_slug, contacts, message)
except Exception, ex:
SEND_ERROR = ex
except ContactGroup.DoesNotExist:
raise InvalidGroupError(message = "%s not found" % group_slug)
if SEND_ERROR:
raise PartialSendError(message = "Error in sending message: %s" % SEND_ERROR,
notification_id = notification_id)
return notification_id
def fire_event(api_user, event_slug, post_args):
""" Fire an event and message associated users and groups.
@todo inefficient double read of group/contact records - one in this loop
and one in the message_[contact|group] methods
@todo passed exceptions - PUT IN LOGGING, DAMNIT!
"""
notification_id = create_notification_id()
try:
event = Event.objects.get(slug = event_slug)
except Event.DoesNotExist:
raise InvalidEventError(message = "%s not found" % event_slug)
if not event.enabled:
raise DisabledEventError(message = "Event %s is disabled" % event_slug)
template = Template(event.message)
context = Context(post_args)
message = template.render(context)
contacts = []
for group in [g for g in event.groups.all() if g.enabled]:
contacts += [c for c in group.contacts.all() if c.enabled]
contacts += [c for c in event.contacts.all() if c.enabled]
# de-dupe contacts in the list - order changes but we don't mind
contacts = list(set(contacts))
enqueue(api_user, notification_id, 'event', event_slug, contacts, message)
return notification_id
def status_callback(callback_data):
""" Receive callback with delivery status data from the gateway """
squawk.gateway.gateway().status_callback(callback_data)
def inbound_callback(callback_data):
""" Receive callback with inbound message payload """
squawk.gateway.gateway().inbound_callback(callback_data)
|
{
"content_hash": "49f0436070892a8601adbbc275617846",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 94,
"avg_line_length": 38.04907975460123,
"alnum_prop": 0.6347952273460175,
"repo_name": "aquamatt/RedFlash",
"id": "99ea5ac0c2b011ea2d473bbcbc10ab90a0949b90",
"size": "6305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/squawk/lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "D",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "119231"
}
],
"symlink_target": ""
}
|
import multiprocessing
class MyProcess(multiprocessing.Process):
def run(self):
print ('called run method in %s' %self.name)
return
if __name__ == '__main__':
jobs = []
for i in range(5):
p = MyProcess()
jobs.append(p)
p.start()
p.join()
|
{
"content_hash": "f2092daa460540ad391c30ba6c49e12b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 18.823529411764707,
"alnum_prop": 0.50625,
"repo_name": "IdiosyncraticDragon/Reading-Notes",
"id": "da5e8d6d72cbe53ddacc07880856632e5f37e8ac",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python Parallel Programming Cookbook_Code/Chapter 3/subclass_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69018"
}
],
"symlink_target": ""
}
|
"""Support for Ambient Weather Station sensors."""
from __future__ import annotations
from datetime import datetime
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_NAME,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
DEGREE,
IRRADIATION_WATTS_PER_SQUARE_METER,
LIGHT_LUX,
PERCENTAGE,
PRECIPITATION_INCHES,
PRECIPITATION_INCHES_PER_HOUR,
PRESSURE_INHG,
SPEED_MILES_PER_HOUR,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import (
TYPE_SOLARRADIATION,
TYPE_SOLARRADIATION_LX,
AmbientStation,
AmbientWeatherEntity,
)
from .const import ATTR_LAST_DATA, DOMAIN
TYPE_24HOURRAININ = "24hourrainin"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_CO2 = "co2"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_DEWPOINT = "dewPoint"
TYPE_EVENTRAININ = "eventrainin"
TYPE_FEELSLIKE = "feelsLike"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY10 = "humidity10"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITY9 = "humidity9"
TYPE_HUMIDITYIN = "humidityin"
TYPE_LASTRAIN = "lastRain"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_PM25 = "pm25"
TYPE_PM25_24H = "pm25_24h"
TYPE_PM25_IN = "pm25_in"
TYPE_PM25_IN_24H = "pm25_in_24h"
TYPE_SOILHUM1 = "soilhum1"
TYPE_SOILHUM10 = "soilhum10"
TYPE_SOILHUM2 = "soilhum2"
TYPE_SOILHUM3 = "soilhum3"
TYPE_SOILHUM4 = "soilhum4"
TYPE_SOILHUM5 = "soilhum5"
TYPE_SOILHUM6 = "soilhum6"
TYPE_SOILHUM7 = "soilhum7"
TYPE_SOILHUM8 = "soilhum8"
TYPE_SOILHUM9 = "soilhum9"
TYPE_SOILTEMP1F = "soiltemp1f"
TYPE_SOILTEMP10F = "soiltemp10f"
TYPE_SOILTEMP2F = "soiltemp2f"
TYPE_SOILTEMP3F = "soiltemp3f"
TYPE_SOILTEMP4F = "soiltemp4f"
TYPE_SOILTEMP5F = "soiltemp5f"
TYPE_SOILTEMP6F = "soiltemp6f"
TYPE_SOILTEMP7F = "soiltemp7f"
TYPE_SOILTEMP8F = "soiltemp8f"
TYPE_SOILTEMP9F = "soiltemp9f"
TYPE_TEMP10F = "temp10f"
TYPE_TEMP1F = "temp1f"
TYPE_TEMP2F = "temp2f"
TYPE_TEMP3F = "temp3f"
TYPE_TEMP4F = "temp4f"
TYPE_TEMP5F = "temp5f"
TYPE_TEMP6F = "temp6f"
TYPE_TEMP7F = "temp7f"
TYPE_TEMP8F = "temp8f"
TYPE_TEMP9F = "temp9f"
TYPE_TEMPF = "tempf"
TYPE_TEMPINF = "tempinf"
TYPE_TOTALRAININ = "totalrainin"
TYPE_UV = "uv"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_WINDDIR = "winddir"
TYPE_WINDDIR_AVG10M = "winddir_avg10m"
TYPE_WINDDIR_AVG2M = "winddir_avg2m"
TYPE_WINDGUSTDIR = "windgustdir"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_WINDSPDMPH_AVG10M = "windspdmph_avg10m"
TYPE_WINDSPDMPH_AVG2M = "windspdmph_avg2m"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_YEARLYRAININ = "yearlyrainin"
SENSOR_DESCRIPTIONS = (
SensorEntityDescription(
key=TYPE_24HOURRAININ,
name="24 Hr Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key=TYPE_BAROMABSIN,
name="Abs Pressure",
native_unit_of_measurement=PRESSURE_INHG,
device_class=SensorDeviceClass.PRESSURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_BAROMRELIN,
name="Rel Pressure",
native_unit_of_measurement=PRESSURE_INHG,
device_class=SensorDeviceClass.PRESSURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_CO2,
name="co2",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
device_class=SensorDeviceClass.CO2,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_DAILYRAININ,
name="Daily Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key=TYPE_DEWPOINT,
name="Dew Point",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_EVENTRAININ,
name="Event Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_FEELSLIKE,
name="Feels Like",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_HOURLYRAININ,
name="Hourly Rain Rate",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES_PER_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key=TYPE_HUMIDITY10,
name="Humidity 10",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY1,
name="Humidity 1",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY2,
name="Humidity 2",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY3,
name="Humidity 3",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY4,
name="Humidity 4",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY5,
name="Humidity 5",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY6,
name="Humidity 6",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY7,
name="Humidity 7",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY8,
name="Humidity 8",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY9,
name="Humidity 9",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_HUMIDITYIN,
name="Humidity In",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_LASTRAIN,
name="Last Rain",
icon="mdi:water",
device_class=SensorDeviceClass.TIMESTAMP,
),
SensorEntityDescription(
key=TYPE_MAXDAILYGUST,
name="Max Gust",
icon="mdi:weather-windy",
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_MONTHLYRAININ,
name="Monthly Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_PM25_24H,
name="PM25 24h Avg",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
),
SensorEntityDescription(
key=TYPE_PM25_IN,
name="PM25 Indoor",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_PM25_IN_24H,
name="PM25 Indoor 24h Avg",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
),
SensorEntityDescription(
key=TYPE_PM25,
name="PM25",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILHUM10,
name="Soil Humidity 10",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM1,
name="Soil Humidity 1",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM2,
name="Soil Humidity 2",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM3,
name="Soil Humidity 3",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM4,
name="Soil Humidity 4",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM5,
name="Soil Humidity 5",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM6,
name="Soil Humidity 6",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM7,
name="Soil Humidity 7",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM8,
name="Soil Humidity 8",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILHUM9,
name="Soil Humidity 9",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
),
SensorEntityDescription(
key=TYPE_SOILTEMP10F,
name="Soil Temp 10",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP1F,
name="Soil Temp 1",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP2F,
name="Soil Temp 2",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP3F,
name="Soil Temp 3",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP4F,
name="Soil Temp 4",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP5F,
name="Soil Temp 5",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP6F,
name="Soil Temp 6",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP7F,
name="Soil Temp 7",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP8F,
name="Soil Temp 8",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOILTEMP9F,
name="Soil Temp 9",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOLARRADIATION,
name="Solar Rad",
native_unit_of_measurement=IRRADIATION_WATTS_PER_SQUARE_METER,
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_SOLARRADIATION_LX,
name="Solar Rad",
native_unit_of_measurement=LIGHT_LUX,
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP10F,
name="Temp 10",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP1F,
name="Temp 1",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP2F,
name="Temp 2",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP3F,
name="Temp 3",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP4F,
name="Temp 4",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP5F,
name="Temp 5",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP6F,
name="Temp 6",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP7F,
name="Temp 7",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP8F,
name="Temp 8",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMP9F,
name="Temp 9",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMPF,
name="Temp",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TEMPINF,
name="Inside Temp",
native_unit_of_measurement=TEMP_FAHRENHEIT,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_TOTALRAININ,
name="Lifetime Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_UV,
name="UV Index",
native_unit_of_measurement="Index",
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_WEEKLYRAININ,
name="Weekly Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_WINDDIR,
name="Wind Dir",
icon="mdi:weather-windy",
native_unit_of_measurement=DEGREE,
),
SensorEntityDescription(
key=TYPE_WINDDIR_AVG10M,
name="Wind Dir Avg 10m",
icon="mdi:weather-windy",
native_unit_of_measurement=DEGREE,
),
SensorEntityDescription(
key=TYPE_WINDDIR_AVG2M,
name="Wind Dir Avg 2m",
icon="mdi:weather-windy",
native_unit_of_measurement=DEGREE,
),
SensorEntityDescription(
key=TYPE_WINDGUSTDIR,
name="Gust Dir",
icon="mdi:weather-windy",
native_unit_of_measurement=DEGREE,
),
SensorEntityDescription(
key=TYPE_WINDGUSTMPH,
name="Wind Gust",
icon="mdi:weather-windy",
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_WINDSPDMPH_AVG10M,
name="Wind Avg 10m",
icon="mdi:weather-windy",
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
),
SensorEntityDescription(
key=TYPE_WINDSPDMPH_AVG2M,
name="Wind Avg 2m",
icon="mdi:weather-windy",
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
),
SensorEntityDescription(
key=TYPE_WINDSPEEDMPH,
name="Wind Speed",
icon="mdi:weather-windy",
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_YEARLYRAININ,
name="Yearly Rain",
icon="mdi:water",
native_unit_of_measurement=PRECIPITATION_INCHES,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Ambient PWS sensors based on a config entry."""
ambient = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
AmbientWeatherSensor(ambient, mac_address, station[ATTR_NAME], description)
for mac_address, station in ambient.stations.items()
for description in SENSOR_DESCRIPTIONS
if description.key in station[ATTR_LAST_DATA]
]
)
class AmbientWeatherSensor(AmbientWeatherEntity, SensorEntity):
"""Define an Ambient sensor."""
def __init__(
self,
ambient: AmbientStation,
mac_address: str,
station_name: str,
description: EntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(ambient, mac_address, station_name, description)
if description.key == TYPE_SOLARRADIATION_LX:
# Since TYPE_SOLARRADIATION and TYPE_SOLARRADIATION_LX will have the same
# name in the UI, we influence the entity ID of TYPE_SOLARRADIATION_LX here
# to differentiate them:
self.entity_id = f"sensor.{station_name}_solar_rad_lx"
@callback
def update_from_latest_data(self) -> None:
"""Fetch new state data for the sensor."""
raw = self._ambient.stations[self._mac_address][ATTR_LAST_DATA][
self.entity_description.key
]
if self.entity_description.key == TYPE_LASTRAIN:
self._attr_native_value = datetime.strptime(raw, "%Y-%m-%dT%H:%M:%S.%f%z")
else:
self._attr_native_value = raw
|
{
"content_hash": "36b2964028501862e11b16481ff2a58b",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 87,
"avg_line_length": 32.43518518518518,
"alnum_prop": 0.6672851841278904,
"repo_name": "home-assistant/home-assistant",
"id": "c5b8b57297ffb8f5024895f65cc1755d7262d505",
"size": "21018",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ambient_station/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
from sqlalchemy import and_, or_, inspect
from datetime import timedelta, datetime, date
from ggrc.models import Notification, NotificationType, ObjectType
from ggrc import db
"""
exposed functions
handle_workflow_modify,
handle_cycle_task_group_object_task_put,
handle_cycle_created,
handle_cycle_modify,
handle_cycle_task_status_change,
"""
def handle_task_group_task(obj, notif_type=None):
if not notif_type:
return
notification = get_notification(obj)
if not notification:
start_date = obj.task_group.workflow.next_cycle_start_date
send_on = start_date - timedelta(notif_type.advance_notice)
add_notif(obj, notif_type, send_on)
def handle_workflow_modify(sender, obj=None, src=None, service=None):
if obj.status != "Active" or obj.frequency == "one_time":
return
if not obj.next_cycle_start_date:
obj.next_cycle_start_date = date.today()
notification = get_notification(obj)
notif_type = get_notification_type(
"{}_workflow_starts_in".format(obj.frequency))
if not notification:
send_on = obj.next_cycle_start_date - timedelta(notif_type.advance_notice)
add_notif(obj, notif_type, send_on)
notif_type = get_notification_type("cycle_start_failed")
add_notif(obj, notif_type, obj.next_cycle_start_date + timedelta(1))
for task_group in obj.task_groups:
for task_group_task in task_group.task_group_tasks:
handle_task_group_task(task_group_task, notif_type)
def add_cycle_task_due_notifications(obj):
if obj.status == "Verified":
return
if not obj.cycle_task_group.cycle.is_current:
return
notif_type = get_notification_type("{}_cycle_task_due_in".format(
obj.cycle_task_group.cycle.workflow.frequency))
send_on = obj.end_date - timedelta(notif_type.advance_notice)
add_notif(obj, notif_type, send_on)
notif_type = get_notification_type("cycle_task_due_today")
send_on = obj.end_date - timedelta(notif_type.advance_notice)
add_notif(obj, notif_type, send_on)
def add_new_cycle_task_notifications(obj, start_notif_type=None):
add_notif(obj, start_notif_type, date.today())
add_cycle_task_due_notifications(obj)
def add_cycle_task_reassigned_notification(obj):
# check if the current assignee allready got the first notification
result = db.session.query(Notification)\
.join(ObjectType)\
.join(NotificationType)\
.filter(and_(Notification.object_id == obj.id, # noqa
ObjectType.name == obj.__class__.__name__,
Notification.sent_at != None,
or_(NotificationType.name == "cycle_task_reassigned",
NotificationType.name == "cycle_created",
NotificationType.name == "manual_cycle_created",
)))
if result.count() == 0:
return
notif_type = get_notification_type("cycle_task_reassigned")
add_notif(obj, notif_type)
def modify_cycle_task_notification(obj, notification_name):
notif = db.session.query(Notification)\
.join(ObjectType)\
.join(NotificationType)\
.filter(and_(Notification.object_id == obj.id,
ObjectType.name == obj.__class__.__name__,
Notification.sent_at == None, # noqa
NotificationType.name == notification_name,
))
notif_type = get_notification_type(notification_name)
send_on = obj.end_date - timedelta(
notif_type.advance_notice)
today = datetime.combine(date.today(), datetime.min.time())
if send_on >= today:
# when cycle date is moved in the future, we update the current
# notification or add a new one.
if notif.count() == 1:
notif = notif.one()
notif.send_on = obj.end_date - timedelta(
notif.notification_type.advance_notice)
db.session.add(notif)
else:
add_notif(obj, notif_type, send_on)
else:
# this should not be allowed, but if a cycle task is changed to a past
# date, we remove the current pending notification if it exists
for notif in notif.all():
db.session.delete(notif)
def modify_cycle_task_end_date(obj):
modify_cycle_task_notification(obj, "{}_cycle_task_due_in".format(
obj.cycle_task_group.cycle.workflow.frequency))
modify_cycle_task_notification(obj, "cycle_task_due_today")
def check_all_cycle_tasks_finished(cycle):
statuses = set([task.status for task in cycle.cycle_task_group_object_tasks])
acceptable_statuses = set(['Verified'])
return statuses.issubset(acceptable_statuses)
def handle_cycle_task_status_change(obj, new_status, old_status):
if obj.status == "Declined":
notif_type = get_notification_type("cycle_task_declined")
add_notif(obj, notif_type)
elif obj.status == "Verified":
for notif in get_notification(obj):
db.session.delete(notif)
cycle = obj.cycle_task_group.cycle
if check_all_cycle_tasks_finished(cycle):
notif_type = get_notification_type("all_cycle_tasks_completed")
add_notif(cycle, notif_type)
db.session.flush()
def handle_cycle_task_group_object_task_put(obj):
if inspect(obj).attrs.contact.history.has_changes():
add_cycle_task_reassigned_notification(obj)
if inspect(obj).attrs.end_date.history.has_changes():
modify_cycle_task_end_date(obj)
def remove_all_cycle_task_notifications(obj):
for cycle_task in obj.cycle_task_group_object_tasks:
for notif in get_notification(cycle_task):
db.session.delete(notif)
def handle_cycle_modify(sender, obj=None, src=None, service=None):
if not obj.is_current:
remove_all_cycle_task_notifications(obj)
def handle_cycle_created(sender, obj=None, src=None, service=None,
manually=False):
notification = get_notification(obj)
if not notification:
db.session.flush()
notification_type = get_notification_type(
"manual_cycle_created" if manually else "cycle_created"
)
add_notif(obj, notification_type)
for cycle_task_group in obj.cycle_task_groups:
for task in cycle_task_group.cycle_task_group_tasks:
add_new_cycle_task_notifications(task, notification_type)
def get_notification(obj):
# maybe we shouldn't return different thigs here.
result = db.session.query(Notification).join(ObjectType).filter(
and_(Notification.object_id == obj.id,
ObjectType.name == obj.__class__.__name__,
Notification.sent_at == None)) # noqa
return result.all()
def get_object_type(obj):
return db.session.query(ObjectType).filter(
ObjectType.name == obj.__class__.__name__).one()
def get_notification_type(name):
return db.session.query(NotificationType).filter(
NotificationType.name == name).first()
def add_notif(obj, notif_type, send_on=None):
if not send_on:
send_on = date.today()
notif = Notification(
object_id=obj.id,
object_type=get_object_type(obj),
notification_type=notif_type,
send_on=send_on,
)
db.session.add(notif)
|
{
"content_hash": "f6334bcfe27e68c1d30266cbe015a59d",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 79,
"avg_line_length": 32.649532710280376,
"alnum_prop": 0.6789752397309289,
"repo_name": "hasanalom/ggrc-core",
"id": "3e4e0a961c1acf1af382d4a2394b1c87c969bed4",
"size": "7228",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ggrc_workflows/notification/notification_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235548"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943449"
},
{
"name": "JavaScript",
"bytes": "1205686"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1874549"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
}
|
"""Example for spinner that looks like loader
"""
from __future__ import unicode_literals
import os
import time
import sys
import random
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from halo import Halo
spinner = Halo(text='Downloading dataset.zip', spinner='dots')
try:
spinner.start()
for i in range(100):
spinner.text = '{}% Downloaded dataset.zip'.format(i)
time.sleep(random.random())
spinner.succeed('Downloaded dataset.zip')
except (KeyboardInterrupt, SystemExit):
spinner.stop()
|
{
"content_hash": "80b6a277bb46cc2a506edadddaa65257",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.7068345323741008,
"repo_name": "ManrajGrover/halo",
"id": "5a770d2f7b273527786aad6a1d24eeb9f5a4a1de",
"size": "580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/loader_spin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46702"
}
],
"symlink_target": ""
}
|
"""
Tests of the PyNNNumpyIO and PyNNTextIO classes
"""
from __future__ import with_statement, division
import numpy
import quantities as pq
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.core import Segment, AnalogSignal, SpikeTrain
from neo.io import PyNNNumpyIO, PyNNTextIO
from neo.test.tools import assert_arrays_equal, assert_file_contents_equal
NCELLS = 5
#TODO: common test fails.
from neo.test.io.common_io_test import BaseTestIO
#class CommonTestPyNNNumpyIO(BaseTestIO, unittest.TestCase):
# ioclass = PyNNNumpyIO
class CommonTestPyNNTextIO(BaseTestIO, unittest.TestCase):
ioclass = PyNNTextIO
read_and_write_is_bijective = False
def read_test_file(filename):
contents = numpy.load(filename)
data = contents["data"]
metadata = {}
for name,value in contents['metadata']:
try:
metadata[name] = eval(value)
except Exception:
metadata[name] = value
return data, metadata
read_test_file.__test__ = False
class BaseTestPyNNIO(object):
__test__ = False
def tearDown(self):
if os.path.exists(self.test_file):
os.remove(self.test_file)
def test_write_segment(self):
in_ = self.io_cls(self.test_file)
write_test_file = "write_test.%s" % self.file_extension
out = self.io_cls(write_test_file)
out.write_segment(in_.read_segment(lazy=False, cascade=True))
assert_file_contents_equal(self.test_file, write_test_file)
if os.path.exists(write_test_file):
os.remove(write_test_file)
def build_test_data(self, variable='v'):
metadata = {
'size': NCELLS,
'first_index': 0,
'first_id': 0,
'n': 505,
'variable': variable,
'last_id': 4,
'last_index': 5,
'dt': 0.1,
'label': "population0",
}
if variable == 'v':
metadata['units'] = 'mV'
elif variable == 'spikes':
metadata['units'] = 'ms'
data = numpy.empty((505, 2))
for i in range(NCELLS):
data[i*101:(i+1)*101, 0] = numpy.arange(i, i+101, dtype=float) # signal
data[i*101:(i+1)*101, 1] = i*numpy.ones((101,), dtype=float) # index
return data, metadata
build_test_data.__test__ = False
class BaseTestPyNNIO_Signals(BaseTestPyNNIO):
def setUp(self):
self.test_file = "test_file_v.%s" % self.file_extension
self.write_test_file("v")
def test_read_segment_containing_analogsignals_using_eager_cascade(self):
# eager == not lazy
io = self.io_cls(self.test_file)
segment = io.read_segment(lazy=False, cascade=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(len(segment.analogsignals), NCELLS)
as0 = segment.analogsignals[0]
self.assertIsInstance(as0, AnalogSignal)
assert_arrays_equal(as0,
AnalogSignal(numpy.arange(0, 101, dtype=float),
sampling_period=0.1*pq.ms,
t_start=0*pq.s,
units=pq.mV))
as4 = segment.analogsignals[4]
self.assertIsInstance(as4, AnalogSignal)
assert_arrays_equal(as4,
AnalogSignal(numpy.arange(4, 105, dtype=float),
sampling_period=0.1*pq.ms,
t_start=0*pq.s,
units=pq.mV))
# test annotations (stuff from file metadata)
def test_read_analogsignal_using_eager(self):
io = self.io_cls(self.test_file)
as3 = io.read_analogsignal(lazy=False, channel_index=3)
self.assertIsInstance(as3, AnalogSignal)
assert_arrays_equal(as3,
AnalogSignal(numpy.arange(3, 104, dtype=float),
sampling_period=0.1*pq.ms,
t_start=0*pq.s,
units=pq.mV))
# should test annotations: 'channel_index', etc.
def test_read_spiketrain_should_fail_with_analogsignal_file(self):
io = self.io_cls(self.test_file)
self.assertRaises(TypeError, io.read_spiketrain, channel_index=0)
class BaseTestPyNNIO_Spikes(BaseTestPyNNIO):
def setUp(self):
self.test_file = "test_file_spikes.%s" % self.file_extension
self.write_test_file("spikes")
def test_read_segment_containing_spiketrains_using_eager_cascade(self):
io = self.io_cls(self.test_file)
segment = io.read_segment(lazy=False, cascade=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(len(segment.spiketrains), NCELLS)
st0 = segment.spiketrains[0]
self.assertIsInstance(st0, SpikeTrain)
assert_arrays_equal(st0,
SpikeTrain(numpy.arange(0, 101, dtype=float),
t_start=0*pq.s,
t_stop=101*pq.ms,
units=pq.ms))
st4 = segment.spiketrains[4]
self.assertIsInstance(st4, SpikeTrain)
assert_arrays_equal(st4,
SpikeTrain(numpy.arange(4, 105, dtype=float),
t_start=0*pq.s,
t_stop=105*pq.ms,
units=pq.ms))
# test annotations (stuff from file metadata)
def test_read_spiketrain_using_eager(self):
io = self.io_cls(self.test_file)
st3 = io.read_spiketrain(lazy=False, channel_index=3)
self.assertIsInstance(st3, SpikeTrain)
assert_arrays_equal(st3,
SpikeTrain(numpy.arange(3, 104, dtype=float),
t_start=0*pq.s,
t_stop=104*pq.s,
units=pq.ms))
# should test annotations: 'channel_index', etc.
def test_read_analogsignal_should_fail_with_spiketrain_file(self):
io = self.io_cls(self.test_file)
self.assertRaises(TypeError, io.read_analogsignal, channel_index=2)
class BaseTestPyNNNumpyIO(object):
io_cls = PyNNNumpyIO
file_extension = "npz"
def write_test_file(self, variable='v', check=False):
data, metadata = self.build_test_data(variable)
metadata_array = numpy.array(sorted(metadata.items()))
numpy.savez(self.test_file, data=data, metadata=metadata_array)
if check:
data1, metadata1 = read_test_file(self.test_file)
assert metadata == metadata1, "%s != %s" % (metadata, metadata1)
assert data.shape == data1.shape == (505, 2), "%s, %s, (505, 2)" % (data.shape, data1.shape)
assert (data == data1).all()
assert metadata["n"] == 505
write_test_file.__test__ = False
class BaseTestPyNNTextIO(object):
io_cls = PyNNTextIO
file_extension = "txt"
def write_test_file(self, variable='v', check=False):
data, metadata = self.build_test_data(variable)
with open(self.test_file, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
numpy.savetxt(f, data)
if check:
raise NotImplementedError
write_test_file.__test__ = False
class TestPyNNNumpyIO_Signals(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Signals, unittest.TestCase):
__test__ = True
class TestPyNNNumpyIO_Spikes(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Spikes, unittest.TestCase):
__test__ = True
class TestPyNNTextIO_Signals(BaseTestPyNNTextIO, BaseTestPyNNIO_Signals, unittest.TestCase):
__test__ = True
class TestPyNNTextIO_Spikes(BaseTestPyNNTextIO, BaseTestPyNNIO_Spikes, unittest.TestCase):
__test__ = True
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b7a05313020157a66b4f3a76f4260656",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 104,
"avg_line_length": 35.81858407079646,
"alnum_prop": 0.57183446571958,
"repo_name": "tkf/neo",
"id": "609bb240c80c360a41f6f761a91c5839cf18910f",
"size": "8113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/io/test_pynnio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "518282"
}
],
"symlink_target": ""
}
|
from pynos import device
from st2actions.runners.pythonrunner import Action
class bgp_recursion(Action):
def run(self, **kwargs):
conn = (str(kwargs.pop('ip')), str(kwargs.pop('port')))
auth = (str(kwargs.pop('username')), str(kwargs.pop('password')))
test = kwargs.pop('test', False)
callback = kwargs.pop('callback', None)
with device.Device(
conn=conn, auth=auth,
test=test,
callback=callback
) as dev:
dev.bgp.recursion(**kwargs)
return 0
|
{
"content_hash": "ddca8d9bf49bd663cd609e04660ead55",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 32.529411764705884,
"alnum_prop": 0.5840867992766727,
"repo_name": "tonybaloney/st2contrib",
"id": "ff96ab6243e26dc58c6429a8a90fbdeef2701fc7",
"size": "553",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs/vdx/actions/bgp_recursion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5392"
},
{
"name": "Python",
"bytes": "1285946"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7547"
}
],
"symlink_target": ""
}
|
"""
The python basic types generators
"""
import random
from string import ascii_letters
from datetime import date, datetime, timedelta
from decimal import Decimal
def weighted_choice(choices):
"""
Supposes that choices is sequence of two elements items,
where first one is the probability and second is the
result object or callable
>>> result = weighted_choice([(20,'x'), (100, 'y')])
>>> result in ['x', 'y']
True
"""
total = sum([weight for (weight, _) in choices])
i = random.randint(0, total - 1)
for weight, choice in choices:
i -= weight
if i < 0:
if callable(choice):
return choice()
return choice
raise Exception('Bug')
def any_boolean():
"""
Returns True or False
>>> result = any_boolean()
>>> type(result)
<type 'bool'>
"""
return random.choice([True, False])
def any_int(min_value=0, max_value=100, **kwargs):
"""
Return random integer from the selected range
>>> result = any_int(min_value=0, max_value=100)
>>> type(result)
<type 'int'>
>>> result in range(0,101)
True
"""
return random.randint(min_value, max_value)
def any_float(min_value=0, max_value=100, precision=2):
"""
Returns random float
>>> result = any_float(min_value=0, max_value=100, precision=2)
>>> type(result)
<type 'float'>
>>> result >=0 and result <= 100
True
"""
return round(random.uniform(min_value, max_value), precision)
def any_letter(letters = ascii_letters, **kwargs):
"""
Return random letter
>>> result = any_letter(letters = ascii_letters)
>>> type(result)
<type 'str'>
>>> len(result)
1
>>> result in ascii_letters
True
"""
return random.choice(letters)
def any_string(letters = ascii_letters, min_length=3, max_length=100):
"""
Return string with random content
>>> result = any_string(letters = ascii_letters, min_length=3, max_length=100)
>>> type(result)
<type 'str'>
>>> len(result) in range(3,101)
True
>>> any([c in ascii_letters for c in result])
True
"""
length = random.randint(min_length, max_length)
letters = [any_letter(letters=letters) for _ in range(0, length)]
return "".join(letters)
def any_date(from_date=date(1990, 1, 1), to_date=date.today()):
"""
Return random date from the [from_date, to_date] interval
>>> result = any_date(from_date=date(1990,1,1), to_date=date(1990,1,3))
>>> type(result)
<type 'datetime.date'>
>>> result >= date(1990,1,1) and result <= date(1990,1,3)
True
"""
days = any_int(min_value=0, max_value=(to_date - from_date).days)
return from_date + timedelta(days=days)
def any_datetime(from_date=datetime(1990, 1, 1), to_date=datetime.now()):
"""
Return random datetime from the [from_date, to_date] interval
>>> result = any_datetime(from_date=datetime(1990,1,1), to_date=datetime(1990,1,3))
>>> type(result)
<type 'datetime.datetime'>
>>> result >= datetime(1990,1,1) and result <= datetime(1990,1,3)
True
"""
days = any_int(min_value=0, max_value=(to_date - from_date).days-1)
time = timedelta(seconds=any_int(min_value=0, max_value=24*3600-1))
return from_date + timedelta(days=days) + time
def any_decimal(min_value=Decimal(0), max_value=Decimal('99.99'), decimal_places=2):
"""
Return random decimal from the [min_value, max_value] interval
>>> result = any_decimal(min_value=0.999, max_value=3, decimal_places=3)
>>> type(result)
<class 'decimal.Decimal'>
>>> result >= Decimal('0.999') and result <= Decimal(3)
True
"""
return Decimal(str(any_float(min_value=float(min_value),
max_value=float(max_value),
precision=decimal_places)))
|
{
"content_hash": "ba02ea586217f27e6336148aeec05519",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 87,
"avg_line_length": 27.64625850340136,
"alnum_prop": 0.5809547244094488,
"repo_name": "abakar/django-whatever",
"id": "7bcef0c73727fdbb76151559c4b27c665b126078",
"size": "4088",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_any/xunit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16923"
},
{
"name": "Python",
"bytes": "52232"
}
],
"symlink_target": ""
}
|
"""
Base plugin (non-GUI)
"""
import math
import threading
import time
import weakref
from typing import Optional, Tuple
from electroncash.address import Address
from electroncash.bitcoin import COINBASE_MATURITY
from electroncash.plugins import BasePlugin, hook, daemon_command
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import profiler, PrintError, InvalidPassword
from electroncash import Network
from .conf import Conf, Global
from .fusion import Fusion, can_fuse_from, can_fuse_to, is_tor_port, MIN_TX_COMPONENTS
from .server import FusionServer
from .covert import limiter
import random # only used to select random coins
TOR_PORTS = [9050, 9150]
# if more than <N> tor connections have been made recently (see covert.py) then don't start auto-fuses.
AUTOFUSE_RECENT_TOR_LIMIT_LOWER = 60
# if more than <N> tor connections have been made recently (see covert.py) then shut down auto-fuses that aren't yet started
AUTOFUSE_RECENT_TOR_LIMIT_UPPER = 120
# heuristic factor: guess that expected number of coins in wallet in equilibrium is = (this number) / fraction
COIN_FRACTION_FUDGE_FACTOR = 10
# for semi-linked addresses (that share txids in their history), allow linking them with this probability:
KEEP_LINKED_PROBABILITY = 0.1
# how long an auto-fusion may stay in 'waiting' state (without starting-soon) before it cancels itself
AUTOFUSE_INACTIVE_TIMEOUT = 600
# how many random coins to select max in 1 batch -- used by select_random_coins
DEFAULT_MAX_COINS = 20
assert DEFAULT_MAX_COINS > 10
# how many autofusions can be running per-wallet
MAX_AUTOFUSIONS_PER_WALLET = 10
CONSOLIDATE_MAX_OUTPUTS = MIN_TX_COMPONENTS // 3
pnp = None
def get_upnp():
""" return an initialized UPnP singleton """
global pnp
if pnp is not None:
return pnp
try:
import miniupnpc
except ImportError:
raise RuntimeError("python miniupnpc module not installed")
u = miniupnpc.UPnP()
if u.discover() < 1:
raise RuntimeError("can't find UPnP server")
try:
u.selectigd()
except Exception as e:
raise RuntimeError("failed to connect to UPnP IGD")
pnp = u
return u
def select_coins(wallet):
""" Sort the wallet's coins into address buckets, returning two lists:
- Eligible addresses and their coins.
- Ineligible addresses and their coins.
An address is eligible if it satisfies all conditions:
- the address is unfrozen
- has 1, 2, or 3 utxo
- all utxo are confirmed (or matured in case of coinbases)
- has no SLP utxo or frozen utxo
"""
# First, select all the coins
eligible = []
ineligible = []
has_unconfirmed = False
has_coinbase = False
sum_value = 0
mincbheight = (wallet.get_local_height() + 1 - COINBASE_MATURITY if Conf(wallet).autofuse_coinbase
else -1) # -1 here causes coinbase coins to always be rejected
for addr in wallet.get_addresses():
acoins = list(wallet.get_addr_utxo(addr).values())
if not acoins:
continue # prevent inserting empty lists into eligible/ineligible
good = True
if addr in wallet.frozen_addresses:
good = False
for i,c in enumerate(acoins):
sum_value += c['value'] # tally up values regardless of eligibility
# If too many coins, any SLP tokens, any frozen coins, or any
# immature coinbase on the address -> flag all address coins as
# ineligible if not already flagged as such.
good = good and (
i < 3 # must not have too many coins on the same address*
and not c['slp_token'] # must not be SLP
and not c['is_frozen_coin'] # must not be frozen
and (not c['coinbase'] or c['height'] <= mincbheight) # if coinbase -> must be mature coinbase
)
# * = We skip addresses with too many coins, since they take up lots
# of 'space' for consolidation. TODO: there is possibility of
# disruption here, if we get dust spammed. Need to deal with
# 'dusty' addresses by ignoring / consolidating dusty coins.
# Next, detect has_unconfirmed & has_coinbase:
if c['height'] <= 0:
# Unconfirmed -> Flag as not eligible and set the has_unconfirmed flag.
good = False
has_unconfirmed = True
# Update has_coinbase flag if not already set
has_coinbase = has_coinbase or c['coinbase']
if good:
eligible.append((addr,acoins))
else:
ineligible.append((addr,acoins))
return eligible, ineligible, int(sum_value), bool(has_unconfirmed), bool(has_coinbase)
def select_random_coins(wallet, fraction, eligible):
"""
Grab wallet coins with a certain probability, while also paying attention
to obvious linkages and possible linkages.
Returns list of list of coins (bucketed by obvious linkage).
"""
# First, we want to bucket coins together when they have obvious linkage.
# Coins that are linked together should be spent together.
# Currently, just look at address.
addr_coins = eligible
random.shuffle(addr_coins)
# While fusing we want to pay attention to semi-correlations among coins.
# When we fuse semi-linked coins, it increases the linkage. So we try to
# avoid doing that (but rarely, we just do it anyway :D).
# Currently, we just look at all txids touched by the address.
# (TODO this is a disruption vector: someone can spam multiple fusions'
# output addrs with massive dust transactions (2900 outputs in 100 kB)
# that make the plugin think that all those addresses are linked.)
result_txids = set()
result = []
num_coins = 0
for addr, acoins in addr_coins:
if num_coins >= DEFAULT_MAX_COINS:
break
elif num_coins + len(acoins) > DEFAULT_MAX_COINS:
continue
# For each bucket, we give a separate chance of joining.
if random.random() > fraction:
continue
# Semi-linkage check:
# We consider all txids involving the address, historical and current.
ctxids = {txid for txid, height in wallet.get_address_history(addr)}
collisions = ctxids.intersection(result_txids)
# Note each collision gives a separate chance of discarding this bucket.
if random.random() > KEEP_LINKED_PROBABILITY**len(collisions):
continue
# OK, no problems: let's include this bucket.
num_coins += len(acoins)
result.append(acoins)
result_txids.update(ctxids)
if not result:
# nothing was selected, just try grabbing first nonempty bucket
try:
res = next(coins for addr,coins in addr_coins if coins)
result = [res]
except StopIteration:
# all eligible buckets were cleared.
pass
return result
def get_target_params_1(wallet, wallet_conf, active_autofusions, eligible):
""" WIP -- TODO: Rename this function. """
wallet_conf = Conf(wallet)
mode = wallet_conf.fusion_mode
# Note each fusion 'consumes' a certain number of coins by freezing them,
# so that the next fusion has less eligible coins to work with. So each
# call to this may see a smaller n_buckets.
n_buckets = len(eligible)
if mode == 'normal':
return max(2, round(n_buckets / DEFAULT_MAX_COINS)), False
elif mode == 'fan-out':
return max(4, math.ceil(n_buckets / (COIN_FRACTION_FUDGE_FACTOR*0.65))), False
elif mode == 'consolidate':
if n_buckets < MIN_TX_COMPONENTS - CONSOLIDATE_MAX_OUTPUTS:
# Too few eligible buckets to make an effective consolidation.
return 0, False
# In the latter stages of consolidation, only do one fusion
# at a time with all-confirmed rule, to make sure each fusion's outputs
# may be consumed by the subsequent one.
# To avoid weird loops, try to calculate the TOTAL number of coins
# that are either 1) eligible or 2) being fused. (Should stay constant
# as fusions are added/cancelled)
n_coins = sum(len(acoins) for addr,acoins in eligible)
n_total = n_coins + sum(len(f.inputs) for f in active_autofusions)
if n_total < DEFAULT_MAX_COINS*3:
return 1, True
# If coins are scarce then don't make more autofusions unless we
# have none.
if n_buckets < DEFAULT_MAX_COINS*2:
return 1, False
# We still have lots of coins left, so request another autofusion.
return MAX_AUTOFUSIONS_PER_WALLET, False
else: # 'custom'
target_num_auto = wallet_conf.queued_autofuse
confirmed_only = wallet_conf.autofuse_confirmed_only
return int(target_num_auto), bool(confirmed_only)
def get_target_params_2(wallet_conf, sum_value):
""" WIP -- TODO: Rename this function. """
mode = wallet_conf.fusion_mode
fraction = 0.1
if mode == 'custom':
# Determine the fraction that should be used
select_type, select_amount = wallet_conf.selector
if select_type == 'size' and int(sum_value) != 0:
# user wants to get a typical output of this size (in sats)
fraction = COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value
elif select_type == 'count' and int(select_amount) != 0:
# user wants this number of coins
fraction = COIN_FRACTION_FUDGE_FACTOR / select_amount
elif select_type == 'fraction':
# user wants this fraction
fraction = select_amount
# note: fraction at this point could be <0 or >1 but doesn't matter.
elif mode == 'consolidate':
fraction = 1.0
elif mode == 'normal':
fraction = 0.5
elif mode == 'fan-out':
fraction = 0.1
return fraction
class FusionPlugin(BasePlugin):
fusion_server = None
active = True
_run_iter = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # gives us self.config
# Do an initial check on the tor port
self.tor_port_good = None
t = threading.Thread(name = 'Fusion-scan_torport_initial', target = self.scan_torport)
t.start()
# quick lock for the following two WeakKeyDictionary variables
# Locking order wallet.lock -> plugin.lock.
self.lock = threading.Lock()
self.fusions = weakref.WeakKeyDictionary()
self.autofusing_wallets = weakref.WeakKeyDictionary() # wallet -> password
self.remote_donation_address: str = '' # optionally announced by the remote server in 'serverhello' message
if tuple(self.config.get('cashfusion_server', ())) == ('cashfusion.electroncash.dk', 8787, False):
# User's config has the old default non-SSL server. If we see this,
# just wipe the config key so that the new default is used.
# But only reset once, after that let them go back if that is what
# they truly desire.
if self.config.get('cashfusion_server_defaultresetted', 0) < 1:
self.config.set_key('cashfusion_server', None)
self.config.set_key('cashfusion_server_defaultresetted', 1)
def on_close(self,):
super().on_close()
self.stop_fusion_server()
self.active = False
def fullname(self):
return 'CashFusion'
def description(self):
return _("CashFusion Protocol")
def set_remote_donation_address(self, address : str):
self.remote_donation_address = ((isinstance(address, str) and address) or '')[:100]
def get_server(self, ):
return Global(self.config).server
def set_server(self, host, port, ssl):
gconf = Global(self.config)
old = gconf.server
gconf.server = (host, port, ssl) # type/sanity checking done in setter
if old != gconf.server:
self.on_server_changed()
def get_torhost(self):
if self.has_auto_torport():
return Global.Defaults.TorHost
else:
return Global(self.config).tor_host
def set_torhost(self, host):
''' host should be a valid hostname '''
if not host: return
Global(self.config).tor_host = host
def has_auto_torport(self, ):
return Global(self.config).tor_port_auto
def get_torport(self, ):
''' Retreive either manual port or autodetected port; may return None
if 'auto' mode and no Tor port has been autodetected. (this is non-blocking) '''
if self.has_auto_torport():
return self.tor_port_good
else:
return Global(self.config).tor_port_manual
def set_torport(self, port):
# port may be 'auto' or 'manual' or an int
gconf = Global(self.config)
if port == 'auto':
gconf.tor_port_auto = True
return
else:
gconf.tor_port_auto = False
if port == 'manual':
return # we're simply going to use whatever manual port was already set
assert isinstance(port, int)
gconf.tor_port_manual = port
def scan_torport(self, ):
''' Scan for Tor proxy on either the manual port or on a series of
automatic ports. This is blocking. Returns port if it's up, or None if
down / can't find. '''
host = self.get_torhost()
if self.has_auto_torport():
portlist = []
network = Network.get_instance()
if network:
tc = network.tor_controller
if tc and tc.is_enabled() and tc.active_socks_port:
portlist.append(tc.active_socks_port)
portlist.extend(TOR_PORTS)
else:
portlist = [ Global(self.config).tor_port_manual ]
for port in portlist:
if is_tor_port(host, port):
self.tor_port_good = port
break
else:
self.tor_port_good = None
return self.tor_port_good
def on_server_changed(self):
""" When the server is changed, we stop all extant fusions that are not
already 'running' in order to allow for the new change to take effect
immediately. """
self.remote_donation_address = ''
with self.lock:
wallets = list(self.autofusing_wallets.keys())
for wallet in wallets:
self._stop_fusions(wallet, 'Server changed', which='all')
# FIXME here: restart non-auto fusions on the new server!
def get_all_fusions(self, ):
""" Return all still-live fusion objects that have been created using .create_fusion(),
including autofusions and any other fusions. """
with self.lock:
fusions_and_times = list(self.fusions.items())
fusions_and_times.sort(key=lambda x:x[1])
return [f for f,t in fusions_and_times]
def _stop_fusions(self, wallet, reason, *, not_if_running=True, which='auto'):
# which may be 'all' or 'auto'
with wallet.lock:
if not hasattr(wallet, '_fusions'):
return []
running = []
assert which in ('all', 'auto')
fusions = list(wallet._fusions_auto) if which == 'auto' else list(wallet._fusions)
for f in fusions:
f.stop(reason, not_if_running = not_if_running)
if f.status[0] == 'running':
running.append(f)
return running
def disable_autofusing(self, wallet):
with self.lock:
self.autofusing_wallets.pop(wallet, None)
Conf(wallet).autofuse = False
return self._stop_fusions(wallet, 'Autofusing disabled', which='auto')
def enable_autofusing(self, wallet, password):
if password is None and wallet.has_password():
raise InvalidPassword()
else:
wallet.check_password(password)
with self.lock:
self.autofusing_wallets[wallet] = password
Conf(wallet).autofuse = True
def is_autofusing(self, wallet):
with self.lock:
return (wallet in self.autofusing_wallets)
def add_wallet(self, wallet, password=None):
''' Attach the given wallet to fusion plugin, allowing it to be used in
fusions with clean shutdown. Also start auto-fusions for wallets that want
it (if no password).
'''
with wallet.lock:
# Generate wallet._fusions and wallet._fusions_auto; these must
# only be accessed with wallet.lock held.
# all fusions relating to this wallet, either as source or target
# or both.
wallet._fusions = weakref.WeakSet()
# fusions that were auto-started.
wallet._fusions_auto = weakref.WeakSet()
# all accesses to the above must be protected by wallet.lock
if Conf(wallet).autofuse:
try:
self.enable_autofusing(wallet, password)
except InvalidPassword:
self.disable_autofusing(wallet)
def remove_wallet(self, wallet):
''' Detach the provided wallet; returns list of active fusions. '''
with self.lock:
self.autofusing_wallets.pop(wallet, None)
fusions = ()
try:
with wallet.lock:
fusions = list(wallet._fusions)
del wallet._fusions
del wallet._fusions_auto
except AttributeError:
pass
return [f for f in fusions if f.status[0] not in ('complete', 'failed')]
def create_fusion(self, source_wallet, password, coins, target_wallet = None, max_outputs = None):
""" Create a new Fusion object with current server/tor settings. Once created
you must call fusion.start() to launch it.
Both source_wallet.lock and target_wallet.lock must be held.
FIXME: this condition is begging for a deadlock to happen when the two wallets
are different. Need to find a better way if inter-wallet fusing actually happens.
"""
if target_wallet is None:
target_wallet = source_wallet # self-fuse
assert can_fuse_from(source_wallet)
assert can_fuse_to(target_wallet)
host, port, ssl = self.get_server()
if host == 'localhost':
# as a special exemption for the local fusion server, we don't use Tor.
torhost = None
torport = None
else:
torhost = self.get_torhost()
torport = self.get_torport()
if torport is None:
torport = self.scan_torport() # may block for a very short time ...
if torport is None:
self.notify_server_status(False, ("failed", _("Invalid Tor proxy or no Tor proxy found")))
raise RuntimeError("can't find tor port")
fusion = Fusion(self, target_wallet, host, port, ssl, torhost, torport)
target_wallet._fusions.add(fusion)
source_wallet._fusions.add(fusion)
fusion.add_coins_from_wallet(source_wallet, password, coins)
fusion.max_outputs = max_outputs
with self.lock:
self.fusions[fusion] = time.time()
return fusion
def thread_jobs(self, ):
return [self]
def run(self, ):
# this gets called roughly every 0.1 s in the Plugins thread; downclock it to 5 s.
run_iter = self._run_iter + 1
if run_iter < 50:
self._run_iter = run_iter
return
else:
self._run_iter = 0
if not self.active:
return
torcount = limiter.count
# Snapshot of autofusing list; note that remove_wallet may get
# called on one of the wallets, after lock is released.
with self.lock:
wallets_and_passwords = list(self.autofusing_wallets.items())
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_UPPER:
# need tor cooldown, stop the waiting fusions
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
autofusions = set(wallet._fusions_auto)
for f in autofusions:
if f.status[0] in ('complete', 'failed'):
wallet._fusions_auto.discard(f)
continue
if not f.stopping:
f.stop('Tor cooldown', not_if_running = True)
return
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_LOWER:
# no urgent need to stop fusions, but don't queue up any more.
return
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
for f in list(wallet._fusions_auto):
if f.status[0] in ('complete', 'failed'):
wallet._fusions_auto.discard(f)
active_autofusions = list(wallet._fusions_auto)
num_auto = len(active_autofusions)
wallet_conf = Conf(wallet)
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(wallet)
target_num_auto, confirmed_only = get_target_params_1(wallet, wallet_conf, active_autofusions, eligible)
if confirmed_only and has_unconfirmed:
for f in list(wallet._fusions_auto):
f.stop('Wallet has unconfirmed coins... waiting.', not_if_running = True)
continue
if num_auto < min(target_num_auto, MAX_AUTOFUSIONS_PER_WALLET):
# we don't have enough auto-fusions running, so start one
fraction = get_target_params_2(wallet_conf, sum_value)
chosen_buckets = select_random_coins(wallet, fraction, eligible)
coins = [c for l in chosen_buckets for c in l]
if not coins:
self.print_error("auto-fusion skipped due to lack of coins")
continue
if wallet_conf.fusion_mode == 'consolidate':
max_outputs = CONSOLIDATE_MAX_OUTPUTS
if len(chosen_buckets) < (MIN_TX_COMPONENTS - max_outputs):
self.print_error("consolidating auto-fusion skipped due to lack of unrelated coins")
continue
else:
max_outputs = None
try:
f = self.create_fusion(wallet, password, coins, max_outputs = max_outputs)
f.start(inactive_timeout = AUTOFUSE_INACTIVE_TIMEOUT)
self.print_error("started auto-fusion")
except RuntimeError as e:
self.print_error(f"auto-fusion skipped due to error: {e}")
return
wallet._fusions_auto.add(f)
def start_fusion_server(self, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
if self.fusion_server:
raise RuntimeError("server already running")
donation_address = (isinstance(donation_address, Address) and donation_address) or None
self.fusion_server = FusionServer(self.config, network, bindhost, port, upnp = upnp, announcehost = announcehost, donation_address = donation_address)
self.fusion_server.start()
return self.fusion_server.host, self.fusion_server.port
def stop_fusion_server(self):
try:
self.fusion_server.stop('server stopped by operator')
self.fusion_server = None
except Exception:
pass
def update_coins_ui(self, wallet):
''' Default implementation does nothing. Qt plugin subclass overrides
this, which sends a signal to the main thread to update the coins tab.
This is called by the Fusion thread (in its thread context) when it
freezes & unfreezes coins. '''
def notify_server_status(self, b, tup : tuple = None):
''' The Qt plugin subclass implements this to tell the GUI about bad
servers. '''
if not b: self.print_error("notify_server_status:", b, str(tup))
@hook
def donation_address(self, window) -> Optional[Tuple[str,Address]]:
''' Plugin API: Returns a tuple of (description, Address) or None. This
is the donation address that we as a client got from the remote server
(as opposed to the donation address we announce if we are a server). '''
if self.remote_donation_address and Address.is_valid(self.remote_donation_address):
return (self.fullname() + " " + _("Server") + ": " + self.get_server()[0], Address.from_string(self.remote_donation_address))
@daemon_command
def fusion_server_start(self, daemon, config):
# Usage:
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> <donation_addr>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp <donation_addr>
# e.g.:
# ./electron-cash daemon fusion_server_start 0.0.0.0,myfusionserver.com 8787 upnp bitcoincash:qpxiweuqoiweweqeweqw
#
# The main server port will be bound on <bindhost>:<port>.
# Covert submissions will be bound on <bindhost>:<ephemeral_port> (the port is chosen by the OS)
# The main server will tell clients to connect to <announcehost>:<ephemeral_port> .
# The default announcehost is based on an autodetection system, which may not work for some server networking setups.
network = daemon.network
if not network:
return "error: cannot run fusion server without an SPV server connection"
def invoke(firstarg = '0.0.0.0', sport='8787', upnp_str = None, addr_str = None):
bindhost, *extrahosts = firstarg.split(',')
if len(extrahosts) > 1:
raise Exception("too many hosts")
elif len(extrahosts) == 1:
[announcehost,] = extrahosts
else:
announcehost = None
port = int(sport)
pnp = get_upnp() if upnp_str == 'upnp' else None
if not pnp and not addr_str:
# third arg may be addr_str, so swap the args
addr_str = upnp_str
upnp_str = None
addr = None
if addr_str:
assert Address.is_valid(addr_str), "Invalid donation address specified"
addr = Address.from_string(addr_str)
return self.start_fusion_server(network, bindhost, port, upnp = pnp, announcehost = announcehost, donation_address = addr)
try:
host, port = invoke(*config.get('subargs', ()))
except Exception as e:
import traceback, sys; traceback.print_exc(file=sys.stderr)
return f'error: {str(e)}'
return (host, port)
@daemon_command
def fusion_server_stop(self, daemon, config):
self.stop_fusion_server()
return 'ok'
@daemon_command
def fusion_server_status(self, daemon, config):
if not self.fusion_server:
return "fusion server not running"
return dict(poolsizes = {t: len(pool.pool) for t,pool in self.fusion_server.waiting_pools.items()})
@daemon_command
def fusion_server_fuse(self, daemon, config):
if self.fusion_server is None:
return
subargs = config.get('subargs', ())
if len(subargs) != 1:
return "expecting tier"
tier = int(subargs[0])
num_clients = self.fusion_server.start_fuse(tier)
return num_clients
|
{
"content_hash": "c081ec21e5ed41a67162bf430d2ad74a",
"timestamp": "",
"source": "github",
"line_count": 668,
"max_line_length": 158,
"avg_line_length": 42.377245508982035,
"alnum_prop": 0.6083086053412463,
"repo_name": "fyookball/electrum",
"id": "e78719f1978e2ff8646360cfa331a425fb0fbf2a",
"size": "29530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/fusion/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "842"
},
{
"name": "NSIS",
"bytes": "7309"
},
{
"name": "Objective-C",
"bytes": "415997"
},
{
"name": "Python",
"bytes": "2365528"
},
{
"name": "Shell",
"bytes": "26389"
}
],
"symlink_target": ""
}
|
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_bootstrap_external = None
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
# Dictionary protected by the global import lock
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
self._lock = _get_module_lock(self._name)
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Acquire/release internally the global import lock to protect
_module_locks."""
_imp.acquire_lock()
try:
try:
lock = _module_locks[name]()
except KeyError:
lock = None
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(ref, name=name):
_imp.acquire_lock()
try:
# bpo-31070: Check if another thread created a new lock
# after the previous lock was destroyed
# but before the weakref callback was called.
if _module_locks.get(name) is ref:
del _module_locks[name]
finally:
_imp.release_lock()
_module_locks[name] = _weakref.ref(lock, cb)
finally:
_imp.release_lock()
return lock
def _lock_unlock_module(name):
"""Acquires then releases the module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
"""
lock = _get_module_lock(name)
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
# Typically used by loader classes as a method replacement.
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
if fullname in sys.modules:
module = sys.modules[fullname]
_exec(spec, module)
return sys.modules[fullname]
else:
return _load(spec)
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType.__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _module_repr_from_spec(spec)
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
if _bootstrap_external is None:
raise NotImplementedError
self._cached = _bootstrap_external._get_cached(self.origin)
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if _bootstrap_external is None:
raise NotImplementedError
spec_from_file_location = _bootstrap_external.spec_from_file_location
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
if _bootstrap_external is None:
raise NotImplementedError
_NamespaceLoader = _bootstrap_external._NamespaceLoader
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
if override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
return module
def module_from_spec(spec):
"""Create a module based on the provided spec."""
# Typically loaders will not implement create_module().
module = None
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` then it means default
# module creation should be used.
module = spec.loader.create_module(spec)
elif hasattr(spec.loader, 'exec_module'):
raise ImportError('loaders that define exec_module() '
'must also define create_module()')
if module is None:
module = _new_module(spec.name)
_init_module_attrs(spec, module)
return module
def _module_repr_from_spec(spec):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
# Used by importlib.reload() and _load_module_shim().
def _exec(spec, module):
"""Execute the spec's specified module in an existing module's namespace."""
name = spec.name
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# namespace package
_init_module_attrs(spec, module, override=True)
return module
_init_module_attrs(spec, module, override=True)
if not hasattr(spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(name)
else:
spec.loader.exec_module(module)
return sys.modules[name]
def _load_backward_compatible(spec):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(spec):
# A helper for direct use by the import system.
if spec.loader is not None:
# not a namespace package
if not hasattr(spec.loader, 'exec_module'):
return _load_backward_compatible(spec)
module = module_from_spec(spec)
with _installed_safely(module):
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# A namespace package so do nothing.
else:
spec.loader.exec_module(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def _load(spec):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
with _ModuleLockManager(spec.name):
return _load_unlocked(spec)
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
def create_module(self, spec):
"""Create a built-in module"""
if spec.name not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(spec.name),
name=spec.name)
return _call_with_frames_removed(_imp.create_builtin, spec)
@classmethod
def exec_module(self, module):
"""Exec a built-in module"""
_call_with_frames_removed(_imp.exec_builtin, module)
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
load_module = classmethod(_load_module_shim)
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@classmethod
def create_module(cls, spec):
"""Use default semantics for module creation."""
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's spec."""
meta_path = sys.meta_path
if meta_path is None:
# PyImport_Cleanup() is running or has been called.
raise ImportError("sys.meta_path is None, Python is likely "
"shutting down")
if not meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if level > 0:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif not package:
raise ImportError('attempted relative import with no known parent '
'package')
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ModuleNotFoundError(_ERR_MSG.format(name), name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
_NEEDS_LOADING = object()
def _find_and_load(name, import_):
"""Find and load the module."""
with _ModuleLockManager(name):
module = sys.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return _find_and_load_unlocked(name, import_)
if module is None:
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ModuleNotFoundError(message, name=name)
_lock_unlock_module(name)
return module
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return _find_and_load(name, _gcd_import)
def _handle_fromlist(module, fromlist, import_, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module.__name__ + '.__all__'
else:
where = "``from list''"
raise TypeError(f"Item in {where} must be str, "
f"not {type(x).__name__}")
elif x == '*':
if not recursive and hasattr(module, '__all__'):
_handle_fromlist(module, module.__all__, import_,
recursive=True)
elif not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (exc.name == from_name and
sys.modules.get(from_name, _NEEDS_LOADING) is not None):
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
spec = globals.get('__spec__')
if package is not None:
if spec is not None and package != spec.parent:
_warnings.warn("__package__ != __spec__.parent "
f"({package!r} != {spec.parent!r})",
ImportWarning, stacklevel=3)
return package
elif spec is not None:
return spec.parent
else:
_warnings.warn("can't resolve package from __spec__ or __package__, "
"falling back on __name__ and __path__",
ImportWarning, stacklevel=3)
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occurring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
return _load_unlocked(spec)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys
_imp = _imp_module
sys = sys_module
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
_init_module_attrs(spec, module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_warnings',):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the _thread module (needed during bootstrap).
try:
thread_module = _builtin_from_name('_thread')
except ImportError:
# Python was built without threads
thread_module = None
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
global _bootstrap_external
import _frozen_importlib_external
_bootstrap_external = _frozen_importlib_external
_frozen_importlib_external._install(sys.modules[__name__])
|
{
"content_hash": "0143b0e848c2b4ab81682b9ab2a77e76",
"timestamp": "",
"source": "github",
"line_count": 1161,
"max_line_length": 80,
"avg_line_length": 33.45650301464255,
"alnum_prop": 0.5863862214556034,
"repo_name": "HuimingCheng/AutoGrading",
"id": "e2343dd430806dd73c70aa4a0b71893a6d7b950f",
"size": "38843",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "learning/web_Haotian/venv/Lib/importlib/_bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1232"
},
{
"name": "C",
"bytes": "400177"
},
{
"name": "C++",
"bytes": "197133"
},
{
"name": "CMake",
"bytes": "14482"
},
{
"name": "CSS",
"bytes": "10474"
},
{
"name": "HTML",
"bytes": "26684"
},
{
"name": "JavaScript",
"bytes": "6748"
},
{
"name": "Makefile",
"bytes": "13303"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "5769059"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
}
|
from openprocurement.auctions.core.utils import (
opresource,
)
from openprocurement.auctions.dgf.views.other.complaint import (
AuctionComplaintResource,
)
@opresource(name='dgfFinancialAssets:Auction Complaints',
collection_path='/auctions/{auction_id}/complaints',
path='/auctions/{auction_id}/complaints/{complaint_id}',
auctionsprocurementMethodType="dgfFinancialAssets",
description="Financial auction complaints")
class FinancialAuctionComplaintResource(AuctionComplaintResource):
pass
|
{
"content_hash": "a2cc8b2eeb55a3768725c821c3fb9c3c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 37,
"alnum_prop": 0.7441441441441441,
"repo_name": "openprocurement/openprocurement.auctions.dgf",
"id": "7ad32091c4f29c779e31ff07e55e3100562193d4",
"size": "580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openprocurement/auctions/dgf/views/financial/complaint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "464817"
}
],
"symlink_target": ""
}
|
import argparse, os, cPickle, sys, numpy, ntpath
from pyAudioAnalysis import audioFeatureExtraction as aF
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioSegmentation as aS
import matplotlib.pyplot as plt
import io
import os
import shutil
import ntpath
import numpy
import cPickle
import glob
from scipy.fftpack import fft
def parseArguments():
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('-f' , '--foldPath', nargs=1, required=True, help="path to the root of the folds")
parser.add_argument('-m' , '--modeltype', nargs=1, required=True, help="model type")
parser.add_argument("-p", "--classifierParam", type=float, default=1, help="classifier parameter")
args = parser.parse_args()
return args
def computePreRec(CM, classNames): # recall and precision computation from confusion matrix
numOfClasses = CM.shape[0]
if len(classNames) != numOfClasses:
print "Error in computePreRec! Confusion matrix and classNames list must be of the same size!"
return
Precision = []
Recall = []
F1 = []
for i, c in enumerate(classNames):
Precision.append(CM[i,i] / (numpy.sum(CM[:,i])+0.001))
Recall.append(CM[i,i] / (numpy.sum(CM[i,:])+0.001))
F1.append( 2 * Precision[-1] * Recall[-1] / (Precision[-1] + Recall[-1]+0.001))
return Recall, Precision, F1
def spectralCentroid(X):
"""Computes spectral centroid of frame (given abs(FFT))"""
L = X.shape[0]
ind = (numpy.arange(1, len(X) + 1)) * (100/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = numpy.sum(ind * Xt)
DEN = numpy.sum(Xt) + 0.000000001
# Centroid:
C = (NUM / DEN)
return C
def stSpectralRollOff(X, c):
"""Computes spectral roll-off"""
totalEnergy = numpy.sum(X ** 2)
fftLength = len(X)
Thres = c*totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = numpy.cumsum(X ** 2) + 0.00000001
[a, ] = numpy.nonzero(CumSum > Thres)
if len(a) > 0:
mC = numpy.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def fileFeatureExtraction(fileName, signal_type): # feature extraction from file
b = numpy.load(fileName)
rawData = b[signal_type].astype("float64")
means = rawData.mean(axis = 0) # compute average
stds = rawData.std(axis = 0) # compute std
maxs = rawData.max(axis = 0) # compute max values
mins = rawData.min(axis = 0) # compute min values
centroid = []
rolloff = []
for f in range(rawData.shape[1]): # compute spectral features
fTemp = abs(fft(rawData[:,f])); # compute FFT
fTemp = fTemp[0:int(fTemp.shape[0]/2)] # get the first symetrical FFT part
c = 0.9999
centroid.append(spectralCentroid(fTemp)) # compute spectral centroid
rolloff.append(stSpectralRollOff(fTemp, c)) # compute spectral rolloff
featureVector = numpy.concatenate((means, stds, maxs, mins, centroid, rolloff)) # concatenate features to form the final feature vector
return featureVector
def dirFeatureExtraction(dirNames,signal_type): # extract features from a list of directories
features = []
classNames = []
c1 = 0
for d in dirNames: # for each direcotry
types = ('*.npz',)
filesList = []
for files in types:
filesList.extend(glob.glob(os.path.join(d, files)))
filesList = sorted(filesList)
for i, file in enumerate(filesList): # for each npz file
fv = fileFeatureExtraction(file,signal_type)
if numpy.isnan(fv).any():
#print file.split('_')
#c1+=1
continue # extract features and append to feature matrix:
if i==0:
allFeatures = fv
else:
allFeatures = numpy.vstack((allFeatures, fv))
features.append(allFeatures)
classNames.append(d.split(os.sep)[-1])
#print c1
#sys.exit()
return classNames, features
def main(rootName,modelType,classifierParam,signal_type):
CMall = numpy.zeros((2,2))
if modelType != "svm" and modelType != "svm_rbf":
C = [int(classifierParam)]
else:
C = [(classifierParam)]
F1s = []
Accs = []
for ifold in range(0, 10): # for each fold
dirName = rootName + os.sep + "fold_{0:d}".format(ifold) # get fold path name
classNamesTrain, featuresTrain = dirFeatureExtraction([os.path.join(dirName, "train", "fail"), os.path.join(dirName, "train", "success")],signal_type) # TRAINING data feature extraction
bestParam = aT.evaluateClassifier(featuresTrain, classNamesTrain, 2, modelType, C, 0, 0.90) # internal cross-validation (for param selection)
classNamesTest, featuresTest = dirFeatureExtraction([os.path.join(dirName, "test", "fail"), os.path.join(dirName, "test", "success")],signal_type) # trainGradientBoosting data feature extraction
[featuresTrainNew, MEAN, STD] = aT.normalizeFeatures(featuresTrain) # training features NORMALIZATION
if modelType == "svm": # classifier training
Classifier = aT.trainSVM(featuresTrainNew, bestParam)
elif modelType == "svm_rbf":
Classifier = aT.trainSVM_RBF(featuresTrainNew, bestParam)
elif modelType == "randomforest":
Classifier = aT.trainRandomForest(featuresTrainNew, bestParam)
elif modelType == "gradientboosting":
Classifier = aT.trainGradientBoosting(featuresTrainNew, bestParam)
elif modelType == "extratrees":
Classifier = aT.trainExtraTrees(featuresTrainNew, bestParam)
CM = numpy.zeros((2,2)) # evaluation on testing data
for iC,f in enumerate(featuresTest): # for each class
for i in range(f.shape[0]): # for each testing sample (feature vector)
curF = f[i,:] # get feature vector
curF = (curF - MEAN) / STD # normalize test feature vector
winnerClass = classNamesTrain[int(aT.classifierWrapper(Classifier, modelType, curF)[0])] # classify and get winner class
trueClass = classNamesTest[iC] # get groundtruth class
CM[classNamesTrain.index(trueClass)][classNamesTrain.index(winnerClass)] += 1 # update confusion matrix
CMall += CM # update overall confusion matrix
Recall, Precision, F1 = computePreRec(CM, classNamesTrain) # get recall, precision and F1 (per class)
Acc = numpy.diagonal(CM).sum() / CM.sum() # get overall accuracy
F1s.append(numpy.mean(F1)) # append average F1
Accs.append(Acc) # append clasification accuracy
print
print "FINAL RESULTS"
print
print "----------------------------------"
print "fold\tacc\tf1"
print "----------------------------------"
for i in range(len(F1s)):
print "{0:d}\t{1:.1f}\t{2:.1f}".format(i, 100*Accs[i], 100*F1s[i])
Acc = numpy.diagonal(CMall).sum() / CMall.sum()
Recall, Precision, F1 = computePreRec(CMall, classNamesTrain)
print "----------------------------------"
print "{0:s}\t{1:.1f}\t{2:.1f}".format("Avg", 100*numpy.mean(Accs), 100*numpy.mean(F1s))
print "{0:s}\t{1:.1f}\t{2:.1f}".format("Av CM", 100*Acc, 100*numpy.mean(F1))
print "----------------------------------"
print
print "Overal Confusion matrix:"
aT.printConfusionMatrix(CMall, classNamesTrain)
print
print "FAIL Recall = {0:.1f}".format(100*Recall[classNamesTrain.index("fail")])
print "FAIL Precision = {0:.1f}".format(100*Precision[classNamesTrain.index("fail")])
print "SUCCESS Recall = {0:.1f}".format(100*Recall[classNamesTrain.index("success")])
print "SUCCESS Precision = {0:.1f}".format(100*Precision[classNamesTrain.index("success")])
return CMall,Acc,Recall,Precision,F1
if __name__ == '__main__':
args = parseArguments()
rootName = args.foldPath[0]
modelType = args.modeltype[0]
classifierParam = args.classifierParam
Acc,Recall,Precision,F1 = main(rootName,modelType,classifierParam)
|
{
"content_hash": "605ddbcae9a06b64397fbb2ad6f38bb4",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 217,
"avg_line_length": 67.94915254237289,
"alnum_prop": 0.43452232476926916,
"repo_name": "MikeMpapa/EEG-Sequence-Learning",
"id": "48d50d5ea5b6c95ed63a462b3c6d1eba63883195",
"size": "12027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27721"
}
],
"symlink_target": ""
}
|
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
class Conv(Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=op_padding,
data_format=conv_utils.convert_data_format(self.data_format,
self.rank + 2))
self.built = True
def call(self, inputs):
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
return super(Conv1D, self).call(inputs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by size[0] and size[1] respectively.
Arguments:
size: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
Arguments:
size: int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided,
the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g.
spatial or spatio-temporal).
Arguments:
cropping: int, or tuple of 23ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints:
interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
|
{
"content_hash": "340968f6b91425448abd3be4e079870c",
"timestamp": "",
"source": "github",
"line_count": 2673,
"max_line_length": 104,
"avg_line_length": 42.61391694725028,
"alnum_prop": 0.626976393022378,
"repo_name": "apark263/tensorflow",
"id": "30b919cc0a9038cf0eeb10a240105fbabd591efa",
"size": "114596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/convolutional.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
}
|
def index():
rows = db((db.activity.type=='project')&(db.activity.status=='accepted')).select()
if rows:
return dict(projects=rows)
else:
return plugin_flatpage()
@auth.requires_login()
def apply():
project = db.activity[request.args(1)]
partaker = db((db.partaker.activity == request.args(1)) & (db.partaker.user_id == auth.user_id)).select().first()
db.partaker.user_id.default = auth.user_id
db.partaker.user_id.writable = False
db.partaker.user_id.readable = False
db.partaker.activity.default = request.args(1)
db.partaker.activity.writable = False
db.partaker.activity.readable = False
if partaker is None:
form = SQLFORM(db.partaker)
if form.accepts(request.vars, session, formname="new"):
if not form.vars.activity in (None, ""):
db.partaker.insert(user_id=auth.user_id, activity=form.vars.activity)
session.flash = T("Thanks for joining the partakers list")
redirect(URL(c="projects", f="index"))
else:
db.partaker.id.readable = False
form = SQLFORM(db.partaker, partaker.id)
if form.accepts(request.vars, session, formname="update"):
session.flash = T("Your project's info was updated")
redirect(URL(c="projects", f="index"))
return dict(form=form, partaker=partaker, project=project)
@auth.requires_login()
def dismiss():
partaker = db.partaker[request.args(1)]
project = partaker.activity
partaker.delete_record()
session.flash = T("You dismissed the project" + " " + str(project.title))
redirect(URL(c="projects", f="index"))
@auth.requires(user_is_author_or_manager(activity_id=request.args(1)))
def partakers():
project = db.activity[request.args(1)]
partakers = db(db.partaker.activity == project.id).select()
return dict(partakers=partakers, project=project)
|
{
"content_hash": "68878ce0be2b6aac082b76366c9a6504",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 117,
"avg_line_length": 39.265306122448976,
"alnum_prop": 0.6481288981288982,
"repo_name": "tectronics/web2conf",
"id": "bee74c1f191a3599ac8754ec67bfd97da72ad32f",
"size": "1960",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "controllers/projects.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52089"
},
{
"name": "Groff",
"bytes": "264"
},
{
"name": "HTML",
"bytes": "92282"
},
{
"name": "JavaScript",
"bytes": "714541"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Python",
"bytes": "1164017"
},
{
"name": "Shell",
"bytes": "267"
}
],
"symlink_target": ""
}
|
from .stable_dar import StableDAR
from .dar import DAR, AR
from .har import HAR
from .preprocess import extract_driver
__all__ = ['DAR',
'HAR',
'AR',
'StableDAR',
'extract_driver',
]
|
{
"content_hash": "20516e4afead16ff92f73fffe2f167f3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 38,
"avg_line_length": 20,
"alnum_prop": 0.5333333333333333,
"repo_name": "RPGOne/Skynet",
"id": "1ababe31615a2b39832606e03b7005a8f1b59811",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/Miho",
"path": "pactools-master/pactools/dar_model/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "11425802"
},
{
"name": "Batchfile",
"bytes": "123467"
},
{
"name": "C",
"bytes": "34703955"
},
{
"name": "C#",
"bytes": "55955"
},
{
"name": "C++",
"bytes": "84647314"
},
{
"name": "CMake",
"bytes": "220849"
},
{
"name": "CSS",
"bytes": "39257"
},
{
"name": "Cuda",
"bytes": "1344541"
},
{
"name": "DIGITAL Command Language",
"bytes": "349320"
},
{
"name": "DTrace",
"bytes": "37428"
},
{
"name": "Emacs Lisp",
"bytes": "19654"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Fortran",
"bytes": "16914"
},
{
"name": "HTML",
"bytes": "929759"
},
{
"name": "Java",
"bytes": "112658"
},
{
"name": "JavaScript",
"bytes": "32806873"
},
{
"name": "Jupyter Notebook",
"bytes": "1616334"
},
{
"name": "Lua",
"bytes": "22549"
},
{
"name": "M4",
"bytes": "64967"
},
{
"name": "Makefile",
"bytes": "1046428"
},
{
"name": "Matlab",
"bytes": "888"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "2860"
},
{
"name": "Objective-C",
"bytes": "131433"
},
{
"name": "PHP",
"bytes": "750783"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "626627"
},
{
"name": "Perl 6",
"bytes": "2495926"
},
{
"name": "PowerShell",
"bytes": "38374"
},
{
"name": "Prolog",
"bytes": "300018"
},
{
"name": "Python",
"bytes": "26363074"
},
{
"name": "R",
"bytes": "236175"
},
{
"name": "Rebol",
"bytes": "217"
},
{
"name": "Roff",
"bytes": "328366"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scala",
"bytes": "248902"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "360815"
},
{
"name": "TeX",
"bytes": "105346"
},
{
"name": "Vim script",
"bytes": "6101"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "5158"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.decorators import (api_view, authentication_classes,
permission_classes)
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.fields import BooleanField, CharField
from rest_framework.filters import BaseFilterBackend, OrderingFilter
from rest_framework.mixins import (CreateModelMixin, DestroyModelMixin,
ListModelMixin, RetrieveModelMixin)
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.permissions import BasePermission
from rest_framework.relations import PrimaryKeyRelatedField
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from rest_framework.viewsets import GenericViewSet
from addons.models import Addon
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from users.models import UserProfile
from versions.models import Version
import mkt.comm.forms as forms
import mkt.constants.comm as comm
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView, SilentListModelMixin
from mkt.comm.models import (CommAttachment, CommunicationNote,
CommunicationNoteRead, CommunicationThread,
user_has_perm_note, user_has_perm_thread)
from mkt.comm.tasks import consume_email, mark_thread_read
from mkt.comm.utils import (create_attachments, create_comm_note,
filter_notes_by_read_status)
class AuthorSerializer(ModelSerializer):
name = CharField()
class Meta:
model = UserProfile
fields = ('name',)
class AttachmentSerializer(ModelSerializer):
url = SerializerMethodField('get_absolute_url')
display_name = CharField(source='display_name')
is_image = BooleanField(source='is_image')
def get_absolute_url(self, obj):
return absolutify(obj.get_absolute_url())
class Meta:
model = CommAttachment
fields = ('id', 'created', 'url', 'display_name', 'is_image')
class NoteSerializer(ModelSerializer):
body = CharField()
author_meta = AuthorSerializer(source='author', read_only=True)
reply_to = PrimaryKeyRelatedField(required=False)
is_read = SerializerMethodField('is_read_by_user')
attachments = AttachmentSerializer(source='attachments', read_only=True)
def is_read_by_user(self, obj):
return obj.read_by_users.filter(
pk=self.context['request'].amo_user.id).exists()
class Meta:
model = CommunicationNote
fields = ('id', 'created', 'attachments', 'author', 'author_meta',
'body', 'is_read', 'note_type', 'reply_to', 'thread')
class AddonSerializer(ModelSerializer):
name = CharField()
thumbnail_url = SerializerMethodField('get_icon')
url = CharField(source='get_absolute_url')
review_url = SerializerMethodField('get_review_url')
class Meta:
model = Addon
fields = ('name', 'url', 'thumbnail_url', 'app_slug', 'slug',
'review_url')
def get_icon(self, app):
return app.get_icon_url(64)
def get_review_url(self, obj):
return reverse('reviewers.apps.review', args=[obj.app_slug])
class ThreadSerializer(ModelSerializer):
addon_meta = AddonSerializer(source='addon', read_only=True)
recent_notes = SerializerMethodField('get_recent_notes')
notes_count = SerializerMethodField('get_notes_count')
version_number = SerializerMethodField('get_version_number')
version_is_obsolete = SerializerMethodField('get_version_is_obsolete')
class Meta:
model = CommunicationThread
fields = ('id', 'addon', 'addon_meta', 'version', 'notes_count',
'recent_notes', 'created', 'modified', 'version_number',
'version_is_obsolete')
view_name = 'comm-thread-detail'
def get_recent_notes(self, obj):
notes = (obj.notes.with_perms(self.get_request().amo_user, obj)
.order_by('-created')[:5])
return NoteSerializer(
notes, many=True, context={'request': self.get_request()}).data
def get_notes_count(self, obj):
return obj.notes.count()
def get_version_number(self, obj):
try:
return Version.with_deleted.get(id=obj.version_id).version
except Version.DoesNotExist:
return ''
def get_version_is_obsolete(self, obj):
try:
return Version.with_deleted.get(id=obj.version_id).deleted
except Version.DoesNotExist:
return True
class ThreadPermission(BasePermission):
"""
Permission wrapper for checking if the authenticated user has the
permission to view the thread.
"""
def has_permission(self, request, view):
# Let `has_object_permission` handle the permissions when we retrieve
# an object.
if view.action == 'retrieve':
return True
if not request.user.is_authenticated():
raise PermissionDenied()
return True
def has_object_permission(self, request, view, obj):
"""
Make sure we give correct permissions to read/write the thread.
"""
if not request.user.is_authenticated() or obj.read_permission_public:
return obj.read_permission_public
return user_has_perm_thread(obj, request.amo_user)
class NotePermission(ThreadPermission):
def has_permission(self, request, view):
thread_id = view.kwargs['thread_id']
# We save the thread in the view object so we can use it later.
view.comm_thread = get_object_or_404(CommunicationThread,
id=thread_id)
if view.action == 'list':
return ThreadPermission.has_object_permission(self,
request, view, view.comm_thread)
if view.action == 'create':
if not request.user.is_authenticated():
return False
# Determine permission to add the note based on the thread
# permission.
return ThreadPermission.has_object_permission(self,
request, view, view.comm_thread)
return True
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
return (
ThreadPermission.has_object_permission(self, request, view,
obj.thread) and
user_has_perm_note(obj, request.amo_user))
class EmailCreationPermission(object):
"""Permit if client's IP address is whitelisted."""
def has_permission(self, request, view):
auth_token = request.META.get('HTTP_POSTFIX_AUTH_TOKEN')
if auth_token and auth_token not in settings.POSTFIX_AUTH_TOKEN:
return False
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip and (
remote_ip in settings.WHITELISTED_CLIENTS_EMAIL_API)
class NoAuthentication(BaseAuthentication):
def authenticate(self, request):
return request._request.user, None
class ReadUnreadFilter(BaseFilterBackend):
filter_param = 'show_read'
def filter_queryset(self, request, queryset, view):
"""
Return only read notes if `show_read=true` is truthy and only unread
notes if `show_read=false.
"""
val = request.GET.get('show_read')
if val is None:
return queryset
show_read = BooleanField().from_native(val)
return filter_notes_by_read_status(queryset, request.amo_user,
show_read)
class CommViewSet(CORSMixin, MarketplaceView, GenericViewSet):
"""Some overriding and mixin stuff to adapt other viewsets."""
parser_classes = (FormParser, JSONParser)
def patched_get_request(self):
return lambda x: self.request
def get_serializer_class(self):
original = super(CommViewSet, self).get_serializer_class()
original.get_request = self.patched_get_request()
return original
def partial_update(self, request, *args, **kwargs):
val = BooleanField().from_native(request.DATA.get('is_read'))
if val:
self.mark_as_read(request.amo_user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response('Requested update operation not supported',
status=status.HTTP_403_FORBIDDEN)
class ThreadViewSet(SilentListModelMixin, RetrieveModelMixin,
DestroyModelMixin, CreateModelMixin, CommViewSet):
model = CommunicationThread
serializer_class = ThreadSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (ThreadPermission,)
filter_backends = (OrderingFilter,)
cors_allowed_methods = ['get', 'post', 'patch']
def list(self, request):
self.serializer_class = ThreadSerializer
profile = request.amo_user
# We list all the threads the user has posted a note to.
notes = profile.comm_notes.values_list('thread', flat=True)
# We list all the threads where the user has been CC'd.
cc = profile.comm_thread_cc.values_list('thread', flat=True)
# This gives 404 when an app with given slug/id is not found.
data = {}
if 'app' in request.GET:
form = forms.AppSlugForm(request.GET)
if not form.is_valid():
raise Http404()
notes, cc = list(notes), list(cc)
# TODO: use CommunicationThread.with_perms once other PR merged in.
queryset = CommunicationThread.objects.filter(pk__in=notes + cc,
addon=form.cleaned_data['app'])
# Thread IDs and version numbers from same app.
data['app_threads'] = list(queryset.order_by('version__version')
.values('id', 'version__version'))
else:
# We list all the threads which uses an add-on authored by the
# user and with read permissions for add-on devs.
notes, cc = list(notes), list(cc)
addons = list(profile.addons.values_list('pk', flat=True))
q_dev = Q(addon__in=addons, read_permission_developer=True)
queryset = CommunicationThread.objects.filter(
Q(pk__in=notes + cc) | q_dev)
self.queryset = queryset
res = SilentListModelMixin.list(self, request)
if res.data:
res.data.update(data)
return res
def retrieve(self, *args, **kwargs):
res = super(ThreadViewSet, self).retrieve(*args, **kwargs)
# Thread IDs and version numbers from same app.
res.data['app_threads'] = list(
CommunicationThread.objects.filter(addon_id=res.data['addon'])
.order_by('version__version').values('id', 'version__version'))
return res
def create(self, request, *args, **kwargs):
form = forms.CreateCommThreadForm(request.DATA)
if not form.is_valid():
return Response(
form.errors, status=status.HTTP_400_BAD_REQUEST)
app = form.cleaned_data['app']
version = form.cleaned_data['version']
thread, note = create_comm_note(
app, version, request.amo_user, form.cleaned_data['body'],
note_type=form.cleaned_data['note_type'])
return Response(
NoteSerializer(note, context={'request': self.request}).data,
status=status.HTTP_201_CREATED)
def mark_as_read(self, profile):
mark_thread_read(self.get_object(), profile)
class NoteViewSet(ListModelMixin, CreateModelMixin, RetrieveModelMixin,
DestroyModelMixin, CommViewSet):
model = CommunicationNote
serializer_class = NoteSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (NotePermission,)
cors_allowed_methods = ['get', 'patch', 'post']
def get_queryset(self):
return CommunicationNote.objects.with_perms(
self.request.amo_user, self.comm_thread)
def create(self, request, *args, **kwargs):
thread = get_object_or_404(CommunicationThread, id=kwargs['thread_id'])
# Validate note.
form = forms.CreateCommNoteForm(request.DATA)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
# Create notes.
thread, note = create_comm_note(
thread.addon, thread.version, self.request.amo_user,
form.cleaned_data['body'],
note_type=form.cleaned_data['note_type'])
self.attach_as_reply(note)
return Response(
NoteSerializer(note, context={'request': request}).data,
status=status.HTTP_201_CREATED)
def attach_as_reply(self, note):
# Overridden in ReplyViewSet.
pass
def mark_as_read(self, profile):
CommunicationNoteRead.objects.get_or_create(note=self.get_object(),
user=profile)
class AttachmentViewSet(CreateModelMixin, CommViewSet):
model = CommAttachment
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (NotePermission,)
cors_allowed_methods = ['post']
def create(self, request, note_id, *args, **kwargs):
note = get_object_or_404(CommunicationNote, id=note_id)
if not note.author.id == request.amo_user.id:
return Response(
[{'non_field_errors':
'You must be owner of the note to attach a file.'}],
status=status.HTTP_403_FORBIDDEN)
# Validate attachment.
attachment_formset = None
if request.FILES:
data = request.POST.copy()
data.update({
'form-TOTAL_FORMS': len([k for k in request.FILES if
k.endswith('-attachment')]),
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': comm.MAX_ATTACH
})
if data['form-TOTAL_FORMS'] > comm.MAX_ATTACH:
# TODO: use formset validate_max=True in Django 1.6.
return Response(
[{'non_field_errors':
'Maximum of %s files can be attached.'}],
status=status.HTTP_400_BAD_REQUEST)
attachment_formset = forms.CommAttachmentFormSet(
data=data, files=request.FILES or None)
if not attachment_formset.is_valid():
return Response(attachment_formset.errors,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response([{'non_field_errors': 'No files were attached.'}],
status=status.HTTP_400_BAD_REQUEST)
# Create attachment.
if attachment_formset:
create_attachments(note, attachment_formset)
return Response(
NoteSerializer(note, context={'request': request}).data,
status=status.HTTP_201_CREATED)
def attach_as_reply(self, note):
# Overridden in ReplyViewSet.
pass
def mark_as_read(self, profile):
CommunicationNoteRead.objects.get_or_create(note=self.get_object(),
user=profile)
class ReplyViewSet(NoteViewSet):
"""A note, but a reply to another note."""
cors_allowed_methods = ['get', 'post']
def initialize_request(self, request, *args, **kwargs):
self.parent_note = get_object_or_404(CommunicationNote,
id=kwargs['note_id'])
return super(ReplyViewSet, self).initialize_request(request, *args,
**kwargs)
def get_queryset(self):
return self.parent_note.replies.all()
def attach_as_reply(self, obj):
obj.update(reply_to=self.parent_note)
@api_view(['POST'])
@authentication_classes((NoAuthentication,))
@permission_classes((EmailCreationPermission,))
def post_email(request):
email_body = request.POST.get('body')
if not email_body:
raise ParseError(
detail='email_body not present in the POST data.')
consume_email.apply_async((email_body,))
return Response(status=status.HTTP_201_CREATED)
|
{
"content_hash": "9469b6e662e55840c0eac04c30d6ca1b",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 79,
"avg_line_length": 37.28854625550661,
"alnum_prop": 0.6295705593951207,
"repo_name": "spasovski/zamboni",
"id": "ed11d6e6ad871ec47d5eb14d3ae1417847d2c7df",
"size": "16929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mkt/comm/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "885279"
},
{
"name": "JavaScript",
"bytes": "1677601"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6279560"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import mopidy
from mopidy import config, exceptions, ext
__version__ = '0.1'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
return schema
def validate_environment(self):
try:
import gmusicapi
except ImportError as e:
raise exceptions.ExtensionError('gmusicapi library not found', e)
pass
def get_backend_classes(self):
from .actor import GMusicBackend
return [GMusicBackend]
|
{
"content_hash": "a0e0a1befaaf93466cd83ad44f35b48e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 24.91891891891892,
"alnum_prop": 0.6366594360086768,
"repo_name": "shapr/mopidy-gmusic",
"id": "fed5dec71038f746b427f61d89bf5f30b617fed4",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_gmusic/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10246"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from flask import abort
from werkzeug.exceptions import ServiceUnavailable
from r5d4.analytics import Analytics
from r5d4.flask_redis import get_conf_db, get_data_db
from r5d4.mapping_functions import DIMENSION_EXPANSION_MAP
from r5d4.utility import construct_key
def combinatorial_keys(rem_range):
"""
>>> list(combinatorial_keys([("d1", [1,2]), ("d2", [3,4])])) == \
[('d1', 1, 'd2', 3), ('d1', 1, 'd2', 4), ('d1', 2, 'd2', 3), \
('d1', 2, 'd2', 4)]
True
"""
if not rem_range:
yield ()
return
dimension, dim_range = rem_range[0]
for dim_value in dim_range:
for rest_key in combinatorial_keys(rem_range[1:]):
yield (dimension, dim_value) + rest_key
return
def browse_analytics(a_name, slice_args):
conf_db = get_conf_db()
if not conf_db.sismember("Analytics:Active", a_name):
abort(404)
analytics_definition = conf_db.get("Analytics:ByName:%s" % a_name)
if analytics_definition is None:
abort(404)
try:
analytics = Analytics(analytics_definition)
except (ValueError, AssertionError) as e:
raise ServiceUnavailable(e.args)
data_db = get_data_db(analytics["data_db"])
mapping = analytics["mapping"]
measures = analytics["measures"]
query_dimensions = set(analytics["query_dimensions"])
slice_dimensions = set(analytics["slice_dimensions"])
d_range = []
for d in slice_dimensions:
expand = DIMENSION_EXPANSION_MAP[mapping[d]["type"]]
try:
value_set = expand(slice_args[d])
d_range.append((d, value_set))
except ValueError as e:
abort(400, e.args)
except KeyError as e:
abort(400, ("Missing slice parameter", str(e.args[0])))
d_range_dict = dict(d_range)
def get_range(dimensions):
d_range = map(lambda d: (d, sorted(list(d_range_dict[d]))),
sorted(list(dimensions)))
return d_range
qnos_dimensions = query_dimensions - slice_dimensions
snoq_dimensions = slice_dimensions - query_dimensions
s_range = get_range(slice_dimensions)
snoq_range = get_range(snoq_dimensions)
for qnos in qnos_dimensions:
d_range_dict[qnos] = set()
for s_key in combinatorial_keys(s_range):
refcount_key_str = construct_key('RefCount', s_key, qnos)
d_range_dict[qnos] |= set(data_db.hkeys(refcount_key_str))
q_range = get_range(query_dimensions)
output = []
for q_key in combinatorial_keys(q_range):
row = {}
key_is_set = False
key = None
for q in q_key: # q_key=(Date,20110808,Practice,1)
if not key_is_set:
key = q
key_is_set = True
else:
row[key] = q
key_is_set = False
for measure in measures:
if mapping[measure]["type"][-5:] == "float":
is_float = True
else:
is_float = False
row[measure] = 0
snoq_keys = list(combinatorial_keys(snoq_range))
if len(snoq_keys) < 2:
if len(snoq_keys) == 1:
snoq_key = snoq_keys[0]
else:
snoq_key = None
val_key = construct_key(measure, q_key, snoq_key)
if mapping[measure]["type"] == "unique":
val = data_db.scard(val_key)
else:
val = data_db.get(val_key)
if val:
if is_float:
row[measure] = float(val)
else:
row[measure] = int(val)
else:
for snoq_key in snoq_keys:
val_key = construct_key(measure, q_key, snoq_key)
if mapping[measure]["type"] == "unique":
abort(400, (
"Measure type 'unique' cannot be aggregated"))
else:
val = data_db.get(val_key)
if val:
if is_float:
row[measure] += float(val)
else:
row[measure] += int(val)
output.append(row)
output_response = {
"status": "OK",
"data": output
}
return output_response
|
{
"content_hash": "13a4f949ef95e6648f894804d05edac6",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 74,
"avg_line_length": 34.565891472868216,
"alnum_prop": 0.52164162368244,
"repo_name": "practo/r5d4",
"id": "3fe0df42eafcdf30b1cf7c9a92659bdad4ddf8a6",
"size": "4459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r5d4/analytics_browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52496"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hubs', '0002_auto_20160929_0301'),
]
operations = [
migrations.AlterField(
model_name='hub',
name='room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rooms.Room'),
),
]
|
{
"content_hash": "38a66ae3d0c46ffe65f36d13b3b2d374",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 121,
"avg_line_length": 24.94736842105263,
"alnum_prop": 0.6286919831223629,
"repo_name": "j-windsor/iRiot-WebApp",
"id": "498ccb93a7e6f845d85c34ed3bb85983187d44c3",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hubs/migrations/0003_auto_20160929_0302.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23695"
},
{
"name": "Python",
"bytes": "46591"
},
{
"name": "Shell",
"bytes": "3258"
}
],
"symlink_target": ""
}
|
import leancloud
#from leancloud import User
from leancloud import Object
leancloud.init('73b6c6p6lgs8s07m6yaq5jeu7e19j3i3x7fdt234ufxw9ity', 'h5lu7ils6mutvirgrxeodo6xfuqcgxh4ny0bdar3utl076cu')
class Mail(Object):
@property
def subject(self):
return self.get('subject')
@subject.setter
def subject(self, value):
return self.set('subject', value)
newMail = Mail()
newMail.set('subject','test mail123')
newMail.set('to',[{'email':'philipp.xue@gmail.com','type':'to'}])
newMail.set('html','this is contnet')
newMail.save()
print newMail.id
|
{
"content_hash": "a846fd34061f19023f4b4c8163d3d6f8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 118,
"avg_line_length": 28.55,
"alnum_prop": 0.7320490367775832,
"repo_name": "iforgotid/webmail",
"id": "b0aa10106452535300a90b75969db8ae3e1253d9",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/leancloudDemo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "684"
},
{
"name": "HTML",
"bytes": "6474"
},
{
"name": "JavaScript",
"bytes": "6143"
},
{
"name": "Python",
"bytes": "11095"
}
],
"symlink_target": ""
}
|
from mock import patch
import unittest
import urllib2
from weblab.experiment.devices.http import HttpDevice, WlHttpDeviceURLErrorError, WlHttpDeviceHTTPErrorError, WlHttpDeviceError
from test.util.fakeobjects import fakeaddinfourl
class HttpDeviceTestCase(unittest.TestCase):
@patch('urllib2.urlopen')
def test_ok_response(self, urlopen):
urlopen.return_value = fakeaddinfourl('OK')
device = HttpDevice("localhost", 7779)
resp = device.send_message("command")
self.assertEquals('OK', resp)
@patch('urllib2.urlopen')
def test_http_error_response(self, urlopen):
urlopen.side_effect = urllib2.HTTPError('', 401, '', {}, None)
device = HttpDevice("localhost", 7779)
self.assertRaises(
WlHttpDeviceHTTPErrorError,
device.send_message,
"command"
)
@patch('urllib2.urlopen')
def test_url_error_response(self, urlopen):
urlopen.side_effect = urllib2.URLError('error message')
device = HttpDevice("localhost", 7779)
self.assertRaises(
WlHttpDeviceURLErrorError,
device.send_message,
"command"
)
@patch('urllib2.urlopen')
def test_general_error_response(self, urlopen):
urlopen.side_effect = Exception('error message')
device = HttpDevice("localhost", 7779)
self.assertRaises(
WlHttpDeviceError,
device.send_message,
"command"
)
def suite():
return unittest.makeSuite(HttpDeviceTestCase)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f4901fc97ccb004407d2fe64a6125725",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 127,
"avg_line_length": 28.280701754385966,
"alnum_prop": 0.642059553349876,
"repo_name": "ganeshgore/myremolab",
"id": "5f032d4a954be7ce27b345e2f3e709989c45125a",
"size": "1997",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/test/unit/weblab/experiment/devices/test_http.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "C#",
"bytes": "265761"
},
{
"name": "CSS",
"bytes": "39653"
},
{
"name": "Java",
"bytes": "689284"
},
{
"name": "JavaScript",
"bytes": "74198"
},
{
"name": "PHP",
"bytes": "97324"
},
{
"name": "Python",
"bytes": "5335681"
},
{
"name": "Shell",
"bytes": "794"
},
{
"name": "VHDL",
"bytes": "1372"
}
],
"symlink_target": ""
}
|
"""
WSGI config for ExpLosion project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ExpLosion.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "70d670e3744be1c1cfad1b2543ac23c2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.071428571428573,
"alnum_prop": 0.7760814249363868,
"repo_name": "mbatchkarov/ExpLosion",
"id": "4b889248c2b07683a584ae971ed22527fec3d3bb",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExpLosion/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "520"
},
{
"name": "CoffeeScript",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "3041"
},
{
"name": "Python",
"bytes": "50771"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="choropleth", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
{
"content_hash": "0a01bd65ecef3bcc6ec0cc2874b660bb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 85,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6359223300970874,
"repo_name": "plotly/plotly.py",
"id": "2de9dea3e96ddf815aad0f613c88f4e9ef0a1520",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/_customdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Python client for Microsoft\'s Project Oxford web language model',
'author': 'Will Fitzgerald',
'url': 'https://github.com/willf/oxford_language_model',
'download_url': 'https://github.com/willf/oxford_language_model',
'author_email': 'Will.Fitzgerald@gmail.com',
'version': '0.1',
'install_requires': ['requests'],
'packages': ["oxford_language_model"],
'scripts': [],
'name': 'oxford_language_model'
}
setup(**config)
|
{
"content_hash": "ac52ef7548c049b5bc63e2aaf839d64f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.6638078902229846,
"repo_name": "willf/oxford_language_model",
"id": "3672fbcd05d82c347b97adb12ccebd4b0efba2dc",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8447"
}
],
"symlink_target": ""
}
|
import numpy as np
from bayesnet.tensor.constant import Constant
from bayesnet.tensor.tensor import Tensor
from bayesnet.function import Function
class Swapaxes(Function):
def __init__(self, axis1, axis2):
self.axis1 = axis1
self.axis2 = axis2
def forward(self, x):
x = self._convert2tensor(x)
self.x = x
if isinstance(self.x, Constant):
return Constant(np.swapaxes(x.value, self.axis1, self.axis2))
return Tensor(np.swapaxes(x.value, self.axis1, self.axis2), function=self)
def backward(self, delta):
dx = np.swapaxes(delta, self.axis2, self.axis1)
self.x.backward(dx)
def swapaxes(x, axis1, axis2):
"""
interchange two axes of an array
Parameters
----------
x : np.ndarray
input array
axis1: int
first axis
axis2: int
second axis
Returns
-------
output : np.ndarray
interchanged array
"""
return Swapaxes(axis1, axis2).forward(x)
|
{
"content_hash": "f6c596d13e8780342f7874c687f9d2ea",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 23.46511627906977,
"alnum_prop": 0.6164519326065411,
"repo_name": "ctgk/BayesianNetwork",
"id": "ce3434ad4b31d6c7c037a73ad418f3a7903196f2",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bayesnet/array/swapaxes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "490838"
},
{
"name": "Python",
"bytes": "138141"
}
],
"symlink_target": ""
}
|
from zoo.chronos.autots.deprecated.feature.utils import save_config
from zoo.chronos.autots.deprecated.feature.abstract import BaseFeatureTransformer
from zoo.chronos.utils import deprecated
import sklearn
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import json
from packaging import version
TIME_FEATURE = ("MINUTE", "DAY", "DAYOFYEAR", "HOUR", "WEEKDAY", "WEEKOFYEAR", "MONTH")
ADDITIONAL_TIME_FEATURE = ("IS_AWAKE", "IS_BUSY_HOURS", "IS_WEEKEND")
class TimeSequenceFeatureTransformer(BaseFeatureTransformer):
"""
TimeSequence feature engineering
"""
def __init__(self, future_seq_len=1,
dt_col="datetime",
target_col=["value"],
extra_features_col=None,
drop_missing=True,
time_features=True):
"""
Constructor.
:param future_seq_len: the future sequence length to be predicted
:dt_col: name of datetime column in the input data frame
:target_col: name of target column in the input data frame
:extra_features_col: name of extra feature columns that needs to predict the target column.
:param drop_missing: whether to drop missing values in the curve, if this is set to False,
an error will be reported if missing values are found. If True, will
drop the missing values and won't raise errors.
"""
# self.scaler = MinMaxScaler()
self.scaler = StandardScaler()
self.config = None
self.dt_col = dt_col
if isinstance(target_col, str):
self.target_col = [target_col]
else:
self.target_col = target_col
self.extra_features_col = extra_features_col
self.feature_data = None
self.drop_missing = drop_missing
self.generate_feature_list = None
self.past_seq_len = None
self.future_seq_len = future_seq_len
self.time_features = time_features
def _fit_transform(self, input_df):
"""
Fit data and transform the raw data to features. This is used in training for hyper
parameter searching.
This method will refresh the parameters (e.g. min and max of the MinMaxScaler) if any
:param input_df: The input time series data frame, Example:
datetime value "extra feature 1" "extra feature 2"
2019-01-01 1.9 1 2
2019-01-02 2.3 0 2
:return: tuple (x,y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index
(data type needs to be numpy datetime type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: y is 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
"""
self._check_input(input_df, mode="train")
# print(input_df.shape)
feature_data = self._get_features(input_df, self.config)
self.scaler.fit(feature_data)
data_n = self._scale(feature_data)
assert np.mean(data_n[0]) < 1e-5
(x, y) = self._roll_train(data_n,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len)
return x, y
def fit_transform(self, input_df, **config):
"""
Fit data and transform the raw data to features. This is used in training for hyper
parameter searching.
This method will refresh the parameters (e.g. min and max of the MinMaxScaler) if any
:param input_df: The input time series data frame, it can be a list of data frame or just
one dataframe
Example:
datetime value "extra feature 1" "extra feature 2"
2019-01-01 1.9 1 2
2019-01-02 2.3 0 2
:return: tuple (x,y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index
(data type needs to be numpy datetime type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: y is 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
"""
self.config = self._get_feat_config(**config)
if isinstance(input_df, list):
train_x_list = []
train_y_list = []
for df in input_df:
x, y = self._fit_transform(df)
train_x_list.append(x)
train_y_list.append(y)
train_x = np.concatenate(train_x_list, axis=0)
train_y = np.concatenate(train_y_list, axis=0)
else:
train_x, train_y = self._fit_transform(input_df)
return train_x, train_y
def _transform(self, input_df, mode):
"""
Transform data into features using the preset of configurations from fit_transform
:param input_df: The input time series data frame.
Example:
datetime value "extra feature 1" "extra feature 2"
2019-01-01 1.9 1 2
2019-01-02 2.3 0 2
:param mode: 'val'/'test'.
:return: tuple (x,y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index
(data type needs to be numpy datetime type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: y is 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
"""
self._check_input(input_df, mode)
# generate features
feature_data = self._get_features(input_df, self.config)
# select and standardize data
data_n = self._scale(feature_data)
if mode == 'val':
(x, y) = self._roll_train(data_n,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len)
return x, y
else:
x = self._roll_test(data_n, past_seq_len=self.past_seq_len)
return x, None
def transform(self, input_df, is_train=True):
"""
Transform data into features using the preset of configurations from fit_transform
:param input_df: The input time series data frame, input_df can be a list of data frame or
one data frame.
Example:
datetime value "extra feature 1" "extra feature 2"
2019-01-01 1.9 1 2
2019-01-02 2.3 0 2
:param is_train: If the input_df is for training.
:return: tuple (x,y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index
(data type needs to be numpy datetime type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: y is 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
"""
if self.config is None or self.past_seq_len is None:
raise Exception("Needs to call fit_transform or restore first before calling transform")
mode = "val" if is_train else "test"
if isinstance(input_df, list):
output_x_list = []
output_y_list = []
for df in input_df:
if mode == 'val':
x, y = self._transform(df, mode)
output_x_list.append(x)
output_y_list.append(y)
else:
x, _ = self._transform(df, mode)
output_x_list.append(x)
output_x = np.concatenate(output_x_list, axis=0)
if output_y_list:
output_y = np.concatenate(output_y_list, axis=0)
else:
output_y = None
else:
output_x, output_y = self._transform(input_df, mode)
return output_x, output_y
def _unscale(self, y):
# for standard scalar
y_unscale = np.zeros(y.shape)
for i in range(len(self.target_col)):
value_mean = self.scaler.mean_[i]
value_scale = self.scaler.scale_[i]
y_unscale[:, i:i+self.future_seq_len] = \
y[:, i:i+self.future_seq_len] * value_scale + value_mean
return y_unscale
def unscale_uncertainty(self, y_uncertainty):
y_uncertainty_unscale = np.zeros(y_uncertainty.shape)
for i in range(len(self.target_col)):
value_scale = self.scaler.scale_[i]
if len(self.target_col) == 1:
y_uncertainty_unscale = y_uncertainty * value_scale
else:
y_uncertainty_unscale[:, :, i] = y_uncertainty[:, :, i] * value_scale
return y_uncertainty_unscale
def _get_y_pred_df(self, y_pred_dt_df, y_pred_unscale):
"""
get prediction data frame with datetime column and target column.
:param input_df:
:return : prediction data frame. If future_seq_len is 1, the output data frame columns are
datetime | {target_col}. Otherwise, the output data frame columns are
datetime | {target_col}_0 | {target_col}_1 | ...
"""
y_pred_df = y_pred_dt_df
if self.future_seq_len > 1:
for i in range(self.future_seq_len):
for j in range(len(self.target_col)):
column = self.target_col[j] + "_" + str(i)
y_pred_df[column] = pd.DataFrame(y_pred_unscale[:, i])
else:
y_pred_df[self.target_col] = pd.DataFrame(y_pred_unscale)
return y_pred_df
def post_processing(self, input_df, y_pred, is_train):
"""
Used only in pipeline predict, after calling self.transform(input_df, is_train=False).
Post_processing includes converting the predicted array into data frame and scalar inverse
transform.
:param input_df: a list of data frames or one data frame.
:param y_pred: Model prediction result (ndarray).
:param is_train: indicate the output is used to evaluation or prediction.
:return:
In validation mode (is_train=True), return the unscaled y_pred and rolled input_y.
In test mode (is_train=False) return unscaled data frame(s) in the format of
{datetime_col} | {target_col(s)}.
"""
y_pred_unscale = self._unscale(y_pred)
if is_train:
# return unscaled y_pred (ndarray) and y (ndarray).
if isinstance(input_df, list):
y_unscale_list = []
for df in input_df:
_, y_unscale = self._roll_train(df[self.target_col],
self.past_seq_len,
self.future_seq_len)
y_unscale_list.append(y_unscale)
output_y_unscale = np.concatenate(y_unscale_list, axis=0)
else:
_, output_y_unscale = self._roll_train(input_df[self.target_col],
self.past_seq_len,
self.future_seq_len)
return output_y_unscale, y_pred_unscale
else:
# return data frame or a list of data frames.
if isinstance(input_df, list):
y_pred_dt_df_list = self._get_y_pred_dt_df(input_df, self.past_seq_len)
y_pred_df_list = []
y_pred_st_loc = 0
for y_pred_dt_df in y_pred_dt_df_list:
df = self._get_y_pred_df(y_pred_dt_df,
y_pred_unscale[y_pred_st_loc:
y_pred_st_loc + len(y_pred_dt_df)])
y_pred_st_loc = y_pred_st_loc + len(y_pred_dt_df)
y_pred_df_list.append(df)
assert y_pred_st_loc == len(y_pred_unscale)
return y_pred_df_list
else:
y_pred_dt_df = self._get_y_pred_dt_df(input_df, self.past_seq_len)
y_pred_df = self._get_y_pred_df(y_pred_dt_df, y_pred_unscale)
return y_pred_df
def save(self, file_path, replace=False):
"""
save the feature tools internal variables as well as the initialization args.
Some of the variables are derived after fit_transform, so only saving config is not enough.
:param: file : the file to be saved
:return:
"""
# for StandardScaler()
data_to_save = {"mean": self.scaler.mean_.tolist(),
"scale": self.scaler.scale_.tolist(),
"future_seq_len": self.future_seq_len,
"dt_col": self.dt_col,
"target_col": self.target_col,
"extra_features_col": self.extra_features_col,
"drop_missing": self.drop_missing
}
save_config(file_path, data_to_save, replace=replace)
def restore(self, **config):
"""
Restore variables from file
:return:
"""
# with open(file_path, 'r') as input_file:
# result = json.load(input_file)
# for StandardScalar()
self.scaler = StandardScaler()
self.scaler.mean_ = np.asarray(config["mean"])
self.scaler.scale_ = np.asarray(config["scale"])
self.config = self._get_feat_config(**config)
self.future_seq_len = config["future_seq_len"]
self.dt_col = config["dt_col"]
self.target_col = config["target_col"]
self.extra_features_col = config["extra_features_col"]
self.drop_missing = config["drop_missing"]
# for MinMaxScalar()
# self.scaler = MinMaxScaler()
# self.scaler.min_ = np.asarray(result["min"])
# self.scaler.scale_ = np.asarray(result["scale"])
# print(self.scaler.transform(input_data))
def get_feature_list(self):
feature_list = []
if self.time_features:
for feature in (TIME_FEATURE + ADDITIONAL_TIME_FEATURE):
feature_list.append(feature + "({})".format(self.dt_col))
if self.extra_features_col:
feature_list += self.extra_features_col
return feature_list
def get_feature_dim(self):
return len(self.get_feature_list()) + len(self.target_col)
def get_target_dim(self):
return len(self.target_col)
def _get_feat_config(self, **config):
"""
Get feature related arguments from global hyper parameter config and do necessary error
checking
:param config: the global config (usually from hyper parameter tuning)
:return: config only for feature engineering
"""
self._check_config(**config)
feature_config_names = ["selected_features", "past_seq_len"]
feat_config = {}
for name in feature_config_names:
if name not in config:
continue
# raise KeyError("Can not find " + name + " in config!")
feat_config[name] = config[name]
self.past_seq_len = feat_config.get("past_seq_len", 2)
return feat_config
def _check_input(self, input_df, mode="train"):
"""
Check dataframe for integrity. Requires time sequence to come in uniform sampling intervals.
:param input_df:
:return:
"""
# check NaT in datetime
input_df = input_df.reset_index()
dt = input_df[self.dt_col]
if not np.issubdtype(dt, np.datetime64):
raise ValueError("The dtype of datetime column is required to be np.datetime64!")
is_nat = pd.isna(dt)
if is_nat.any(axis=None):
raise ValueError("Missing datetime in input dataframe!")
# check uniform (is that necessary?)
interval = dt[1] - dt[0]
if not all([dt[i] - dt[i - 1] == interval for i in range(1, len(dt))]):
raise ValueError("Input time sequence intervals are not uniform!")
# check missing values
if not self.drop_missing:
is_nan = pd.isna(input_df)
if is_nan.any(axis=None):
raise ValueError("Missing values in input dataframe!")
# check if the length of input data is smaller than requested.
if mode == "test":
min_input_len = self.past_seq_len
error_msg = "Length of {} data should be larger than " \
"the past sequence length selected by automl.\n" \
"{} data length: {}\n" \
"past sequence length selected: {}\n" \
.format(mode, mode, len(input_df), self.past_seq_len)
else:
min_input_len = self.past_seq_len + self.future_seq_len
error_msg = "Length of {} data should be larger than " \
"the sequence length you want to predict " \
"plus the past sequence length selected by automl.\n"\
"{} data length: {}\n"\
"predict sequence length: {}\n"\
"past sequence length selected: {}\n"\
.format(mode, mode, len(input_df), self.future_seq_len, self.past_seq_len)
if len(input_df) < min_input_len:
raise ValueError(error_msg)
return input_df
def _roll_data(self, data, seq_len):
result = []
mask = []
for i in range(len(data) - seq_len + 1):
if seq_len == 1 and len(self.target_col) > 1:
result.append(data[i])
else:
result.append(data[i: i + seq_len])
if pd.isna(data[i: i + seq_len]).any(axis=None):
mask.append(0)
else:
mask.append(1)
return np.asarray(result), np.asarray(mask)
def _roll_train(self, dataframe, past_seq_len, future_seq_len):
"""
roll dataframe into sequence samples to be used in TimeSequencePredictor.
roll_train: split the whole dataset apart to build (x, y).
:param df: a dataframe which has been resampled in uniform frequency.
:param past_seq_len: the length of the past sequence
:param future_seq_len: the length of the future sequence
:return: tuple (x,y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length), in the
last dimension, the 1st col is the time index (data type needs to be numpy datetime type
, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: y is 2-d numpy array in format (no. of samples, future sequence length) if future
sequence length > 1, or 1-d numpy array in format (no. of samples, ) if future sequence
length = 1
"""
x = dataframe[0:-future_seq_len].values
if len(self.target_col) == 1:
y = dataframe.iloc[past_seq_len:, 0].values
else:
y = dataframe.iloc[past_seq_len:, list(range(0, len(self.target_col)))].values
output_x, mask_x = self._roll_data(x, past_seq_len)
output_y, mask_y = self._roll_data(y, future_seq_len)
# assert output_x.shape[0] == output_y.shape[0],
# "The shape of output_x and output_y doesn't match! "
mask = (mask_x == 1) & (mask_y == 1)
return output_x[mask], output_y[mask]
def _roll_test(self, dataframe, past_seq_len):
"""
roll dataframe into sequence samples to be used in TimeSequencePredictor.
roll_test: the whole dataframe is regarded as x.
:param df: a dataframe which has been resampled in uniform frequency.
:param past_seq_len: the length of the past sequence
:return: x
x: 3-d array in format (no. of samples, past sequence length, 2+feature length), in the
last dimension, the 1st col is the time index (data type needs to be numpy datetime type
, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
"""
x = dataframe.values
output_x, mask_x = self._roll_data(x, past_seq_len)
# assert output_x.shape[0] == output_y.shape[0],
# "The shape of output_x and output_y doesn't match! "
mask = (mask_x == 1)
return output_x[mask]
def __get_y_pred_dt_df(self, input_df, past_seq_len):
"""
:param input_df: one data frame
:return: a data frame with prediction datetime
"""
input_df = input_df.reset_index(drop=True)
input_dt_df = input_df.reset_index(drop=True)[[self.dt_col]].copy()
time_delta = input_dt_df.iloc[-1] - input_dt_df.iloc[-2]
last_time = input_dt_df.iloc[-1] + time_delta
last_df = pd.DataFrame({self.dt_col: last_time})
pre_pred_dt_df = input_dt_df[past_seq_len:].copy()
pre_pred_dt_df = pre_pred_dt_df.reset_index(drop=True)
y_pred_dt_df = pre_pred_dt_df.append(last_df, ignore_index=True)
# print(y_pred_dt_df)
return y_pred_dt_df
def _get_y_pred_dt_df(self, input_df, past_seq_len):
"""
:param input_df: a data frame or a list of data frame
:param past_seq_len:
:return:
"""
if isinstance(input_df, list):
y_pred_dt_df_list = []
for df in input_df:
y_pred_dt_df = self.__get_y_pred_dt_df(df, past_seq_len)
y_pred_dt_df_list.append(y_pred_dt_df)
return y_pred_dt_df_list
else:
return self.__get_y_pred_dt_df(input_df, past_seq_len)
def _scale(self, data):
"""
Scale the data
:param data:
:return:
"""
# n_features_in_ only for 0.23 sklearn support, sklearn version >=0.24 will not check this
if sklearn.__version__[:4] == "0.23":
self.scaler.n_features_in_ = self.scaler.mean_.shape[0]
np_scaled = self.scaler.transform(data)
data_s = pd.DataFrame(np_scaled)
return data_s
def _rearrange_data(self, input_df):
"""
change the input_df column order into [datetime, target, feature1, feature2, ...]
:param input_df:
:return:
"""
cols = input_df.columns.tolist()
new_cols = [self.dt_col] + self.target_col +\
[col for col in cols if col != self.dt_col and col not in self.target_col]
rearranged_data = input_df[new_cols].copy
return rearranged_data
def _generate_features(self, input_df):
df = input_df.copy()
df["id"] = df.index + 1
field = df[self.dt_col]
# built in time features
for attr in TIME_FEATURE:
if attr == "WEEKOFYEAR" and \
version.parse(pd.__version__) >= version.parse("1.1.0"):
# DatetimeProperties.weekofyear has been deprecated since pandas 1.1.0,
# convert to DatetimeIndex to fix, and call pd.Int64Index to return a index
field_datetime = pd.to_datetime(field.values.astype(np.int64))
df[attr + "({})".format(self.dt_col)] =\
pd.Int64Index(field_datetime.isocalendar().week)
else:
df[attr + "({})".format(self.dt_col)] = getattr(field.dt, attr.lower())
# additional time features
hour = field.dt.hour
weekday = field.dt.weekday
df["IS_AWAKE" + "({})".format(self.dt_col)] =\
(((hour >= 6) & (hour <= 23)) | (hour == 0)).astype(int).values
df["IS_BUSY_HOURS" + "({})".format(self.dt_col)] =\
(((hour >= 7) & (hour <= 9)) | (hour >= 16) & (hour <= 19)).astype(int).values
df["IS_WEEKEND" + "({})".format(self.dt_col)] =\
(weekday >= 5).values
return df
def _get_features(self, input_df, config):
feature_matrix = self._generate_features(input_df)
selected_features = config.get("selected_features")
if selected_features:
feature_cols = np.asarray(json.loads(selected_features))
else:
feature_cols = self.get_feature_list()
# we do not include target col in candidates.
# the first column is designed to be the default position of target column.
target_col = np.array(self.target_col)
cols = np.concatenate([target_col, feature_cols])
target_feature_matrix = feature_matrix[cols]
return target_feature_matrix.astype(float)
def _get_optional_parameters(self):
return {"past_seq_len", "selected_features"}
def _get_required_parameters(self):
return set()
|
{
"content_hash": "6c5ee89975b03eea1f16d3c8b1d3e945",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 100,
"avg_line_length": 44.931937172774866,
"alnum_prop": 0.5584168414510992,
"repo_name": "intel-analytics/analytics-zoo",
"id": "2469da8a0b3c764be26c6db695ae065b35f81cc0",
"size": "26337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/chronos/autots/deprecated/feature/time_sequence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
"""Tests the version bumper activity."""
from rever import vcsutils
from rever.logger import current_logger
from rever.main import env_main
REVER_XSH = """
$ACTIVITIES = ['version_bump']
$DAG['version_bump'].args = [[
('init.py', r'__version__\s*=.*', "__version__ = '$VERSION'"),
('appveyor.yml', r'version:\s*', (lambda ver: 'version: {0}.{{build}}'.format(ver))),
]]
"""
INIT_PY = "__version__='42.1.0'\n"
APPVEYOR_YML = "version: 42.1.0\n"
def test_version_bump(gitrepo):
files = [('rever.xsh', REVER_XSH), ('init.py', INIT_PY),
('appveyor.yml', APPVEYOR_YML)]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('Some versioned files')
env_main(['42.1.1'])
# now see if this worked
with open('init.py') as f:
init = f.read()
assert "__version__ = '42.1.1'\n" == init
with open('appveyor.yml') as f:
appveyor = f.read()
assert appveyor == "version: 42.1.1.{build}\n"
# ensure that the updates were commited
logger = current_logger()
entries = logger.load()
assert entries[-2]['rev'] != entries[-1]['rev']
|
{
"content_hash": "2b22b51728bd3a0483190b09c49c0460",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 89,
"avg_line_length": 31.18421052631579,
"alnum_prop": 0.5873417721518988,
"repo_name": "scopatz/rever",
"id": "d20b2346697401b78c50262e74f2da3979483fed",
"size": "1185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_version_bump.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20758"
}
],
"symlink_target": ""
}
|
"""test deprecated module
"""
__revision__ = 0
if __revision__:
import Bastion
print Bastion
# false positive (#10061)
import stringfile
print stringfile
|
{
"content_hash": "e73cf997ca8a4f0be481a3d2de6d5193",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 29,
"avg_line_length": 14.833333333333334,
"alnum_prop": 0.6348314606741573,
"repo_name": "dbbhattacharya/kitsune",
"id": "72fb79502c1c633f1f1457c962c217afd4c86280",
"size": "178",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "vendor/packages/pylint/test/input/func_w0403.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, "thirdparty/")
sys.path.insert(0, "core/")
sys.path.insert(0, "modules/")
sys.path.insert(0, "modules/config/")
import config
from BotModule import BotModule
from BotCore import FSIBot
# Open config file
cfg = config.Config(file("bot.config"))
botcfg = cfg.bot
bot = FSIBot(botcfg) #botcfg.channel, botcfg.name, botcfg.password, botcfg.server, botcfg.port, botcfg.debug)
# Add activated modules to the bot
for mod in botcfg.modules:
bot.addModule(mod)
# Start :)
bot.start()
|
{
"content_hash": "590049bc840d99b5f8c35654f0abdd5f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 109,
"avg_line_length": 23.545454545454547,
"alnum_prop": 0.7316602316602316,
"repo_name": "fsi-hska/fsiBot",
"id": "1f8c71a7b3c60c7ec085aad8026968ae0d96ea2d",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45644"
}
],
"symlink_target": ""
}
|
import sys,os,os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
import appliancevm
log.configure_log('/var/log/zstack/zstack-appliancevm.log')
logger = log.get_logger(__name__)
def main():
usage = 'usage: python -c "from appliancevm import daemon; daemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
pidfile = '/var/run/zstack/appliancevm.pid'
dirname = os.path.dirname(pidfile)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
try:
iptables.insert_single_rule_to_filter_table('-A INPUT -i eth0 -p tcp -m tcp --dport 7759 -j ACCEPT')
cmd = sys.argv[1]
agentdaemon = appliancevm.ApplianceVmDaemon(pidfile)
if cmd == 'start':
agentdaemon.start()
elif cmd == 'stop':
agentdaemon.stop()
elif cmd == 'restart':
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
|
{
"content_hash": "57857a3fd29ab83ab26274200dbbd8ea",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 108,
"avg_line_length": 34.17142857142857,
"alnum_prop": 0.6086956521739131,
"repo_name": "ghxandsky/zstack-utility",
"id": "e9046bda5a318f239e8782acad21d78693283ae6",
"size": "1196",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "appliancevm/appliancevm/daemon.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4277"
},
{
"name": "Puppet",
"bytes": "10604"
},
{
"name": "Python",
"bytes": "1507292"
},
{
"name": "Shell",
"bytes": "188218"
}
],
"symlink_target": ""
}
|
import fnmatch
import itertools
import logging
import os
import requests
from six.moves import urllib
from . import rpcclient, dcos_url_path
from ..clients.rpcclient import verify_ssl
from ..errors import DCOSException
logger = logging.getLogger(__name__)
COMPLETED_TASK_STATES = [
"TASK_FINISHED", "TASK_KILLED", "TASK_FAILED", "TASK_LOST", "TASK_ERROR",
"TASK_GONE", "TASK_GONE_BY_OPERATOR", "TASK_DROPPED", "TASK_UNREACHABLE",
"TASK_UNKNOWN"
]
def get_master(dcos_client=None):
"""Create a Master object using the url stored in the
'core.mesos_master_url' property if it exists. Otherwise, we use
cluster url defined by SHAKEDOWN_DCOS_URL.
:param dcos_client: DCOSClient
:type dcos_client: DCOSClient | None
:returns: master state object
:rtype: Master
"""
dcos_client = dcos_client or DCOSClient()
return Master(dcos_client.get_master_state())
class DCOSClient(object):
"""Client for communicating with DC/OS"""
def __init__(self):
self._mesos_master_url = dcos_url_path('mesos/')
self._rpc = rpcclient.create_client(self._mesos_master_url)
def slave_url(self, slave_id, private_url, path):
"""Create a slave URL
:param slave_id: slave ID
:type slave_id: str
:param private_url: The slave's private URL derived from its
pid. Used when we're accessing mesos
directly, rather than through DC/OS.
:type private_url: str
:param path: the path suffix of the desired URL
:type path: str
:returns: URL that hits the master
:rtype: str
"""
if self._mesos_master_url:
return urllib.parse.urljoin(private_url, path)
else:
return dcos_url_path('slave/{}/{}'.format(slave_id, path))
def get_master_state(self):
"""Get the Mesos master state json object
:returns: Mesos' master state json object
:rtype: dict
"""
response = self._rpc.session.get('master/state.json')
return response.json()
def get_slave_state(self, slave_id, private_url):
"""Get the Mesos slave state json object
:param slave_id: slave ID
:type slave_id: str
:param private_url: The slave's private URL derived from its
pid. Used when we're accessing mesos
directly, rather than through DC/OS.
:type private_url: str
:returns: Mesos' master state json object
:rtype: dict
"""
url = self.slave_url(slave_id, private_url, 'state.json')
response = requests.get(url, timeout=self._rpc.session.timoue, auth=self._rpc.session.auth, verify=verify_ssl())
return response.json()
def get_state_summary(self):
"""Get the Mesos master state summary json object
:returns: Mesos' master state summary json object
:rtype: dict
"""
response = self._rpc.session.get('master/state-summary')
return response.json()
def slave_file_read(self, slave_id, private_url, path, offset, length):
"""See the master_file_read() docs
:param slave_id: slave ID
:type slave_id: str
:param path: absolute path to read
:type path: str
:param private_url: The slave's private URL derived from its
pid. Used when we're accessing mesos
directly, rather than through DC/OS.
:type private_url: str
:param offset: start byte location, or -1. -1 means read no data, and
is used to fetch the size of the file in the response's
'offset' parameter.
:type offset: int
:param length: number of bytes to read, or -1. -1 means read the whole
file
:type length: int
:returns: files/read.json response
:rtype: dict
"""
url = self.slave_url(slave_id,
private_url,
'files/read.json')
params = {'path': path,
'length': length,
'offset': offset}
response = requests.get(url, params=params, timeout=self._rpc.session.timoue, auth=self._rpc.session.auth,
verify=verify_ssl())
return response.json()
def master_file_read(self, path, length, offset):
"""This endpoint isn't well documented anywhere, so here is the spec
derived from the mesos source code:
request format:
{
path: absolute path to read
offset: start byte location, or -1. -1 means read no data, and
is used to fetch the size of the file in the response's
'offset' parameter.
length: number of bytes to read, or -1. -1 means read the whole
file.
}
response format:
{
data: file data. Empty if a request.offset=-1. Could be
smaller than request.length if EOF was reached, or if (I
believe) request.length is larger than the length
supported by the server (16 pages I believe).
offset: the offset value from the request, or the size of the
file if the request offset was -1 or >= the file size.
}
:param path: absolute path to read
:type path: str
:param offset: start byte location, or -1. -1 means read no data, and
is used to fetch the size of the file in the response's
'offset' parameter.
:type offset: int
:param length: number of bytes to read, or -1. -1 means read the whole
file
:type length: int
:returns: files/read.json response
:rtype: dict
"""
params = {'path': path,
'length': length,
'offset': offset}
response = self._rpc.session.get('files/read.json', params=params)
return response.json()
def shutdown_framework(self, framework_id):
"""Shuts down a Mesos framework
:param framework_id: ID of the framework to shutdown
:type framework_id: str
:returns: None
"""
logger.info('Shutting down framework {}'.format(framework_id))
data = 'frameworkId={}'.format(framework_id)
self._rpc.session.post('master/teardown', data=data)
def metadata(self):
""" GET /metadata
:returns: /metadata content
:rtype: dict
"""
url = dcos_url_path('metadata')
response = requests.get(url, timeout=self._rpc.session.timeout, auth=self._rpc.session.auth,
verify=verify_ssl())
return response.json()
def browse(self, slave, path):
""" GET /files/browse.json
Request
path:... # path to run ls on
Response
[
{
path: # full path to file
nlink:
size:
mtime:
mode:
uid:
gid:
}
]
:param slave: slave to issue the request on
:type slave: Slave
:returns: /files/browse.json response
:rtype: dict
"""
url = self.slave_url(slave['id'],
slave.http_url(),
'files/browse.json')
response = requests.get(url, params={'path': path}, timeout=self._rpc.session.timeout,
auth=self._rpc.session.auth, verify=verify_ssl())
return response.json()
class MesosDNSClient(object):
""" Mesos-DNS client
:param url: mesos-dns URL
:type url: str
"""
def __init__(self, url=None):
self._url = url or dcos_url_path('/mesos_dns/')
self._rpc = rpcclient.create_client(self._url)
def hosts(self, host):
""" GET v1/hosts/<host>
:param host: host
:type host: str
:returns: {'ip', 'host'} dictionary
:rtype: dict(str, str)
"""
response = self._rpc.session.get('v1/hosts/{}'.format(host), headers={})
return response.json()
def masters(self):
""" Returns ip addresses of all masters
:returns: {'ip', 'host'} dictionary
:rtype: dict(str, str)
"""
return self.hosts('master.mesos')
def leader(self):
""" Returns ip addresses of the leader
:returns: {'ip', 'host'} dictionary
:rtype: dict(str, str)
"""
return self.hosts('leader.mesos')
class Master(object):
"""Mesos Master Model
:param state: Mesos master's state.json
:type state: dict
"""
def __init__(self, state):
self._state = state
self._frameworks = {}
self._slaves = {}
def state(self):
"""Returns master's master/state.json.
:returns: state.json
:rtype: dict
"""
return self._state
def slave(self, fltr):
"""Returns the slave that has `fltr` in its ID. If any slaves
are an exact match, returns that task, id not raises a
DCOSException if there is not exactly one such slave.
:param fltr: filter string
:type fltr: str
:returns: the slave that has `fltr` in its ID
:rtype: Slave
"""
slaves = self.slaves(fltr)
if len(slaves) == 0:
raise DCOSException('No agent found with ID "{}".'.format(fltr))
elif len(slaves) > 1:
exact_matches = [s for s in slaves if s['id'] == fltr]
if len(exact_matches) == 1:
return exact_matches[0]
else:
matches = ['\t{0}'.format(s['id']) for s in slaves]
raise DCOSException(
"There are multiple agents with that ID. " +
"Please choose one:\n{}".format('\n'.join(matches)))
else:
return slaves[0]
def task(self, fltr, completed=False):
"""Returns the task with `fltr` in its ID. Raises a DCOSException if
there is not exactly one such task.
:param fltr: filter string
:type fltr: str
:returns: the task that has `fltr` in its ID
:param completed: also include completed tasks
:type completed: bool
:rtype: Task
"""
tasks = self.tasks(fltr, completed)
if len(tasks) == 0:
raise DCOSException(
'Cannot find a task with ID containing "{}"'.format(fltr))
elif len(tasks) > 1:
msg = [("There are multiple tasks with ID matching [{}]. " +
"Please choose one:").format(fltr)]
msg += ["\t{0}".format(t["id"]) for t in tasks]
raise DCOSException('\n'.join(msg))
else:
return tasks[0]
def framework(self, framework_id):
"""Returns a framework by ID
:param framework_id: the framework's ID
:type framework_id: str
:returns: the framework
:rtype: Framework
"""
for f in self._framework_dicts(True, True):
if f['id'] == framework_id:
return self._framework_obj(f)
return None
def slaves(self, fltr=""):
"""Returns those slaves that have `fltr` in their 'id'
:param fltr: filter string
:type fltr: str
:returns: Those slaves that have `fltr` in their 'id'
:rtype: [Slave]
"""
return [self._slave_obj(slave)
for slave in self.state()['slaves']
if fltr in slave['id']]
def tasks(self, fltr=None, completed=False, all_=False):
"""Returns tasks running under the master
:param fltr: May be None, a substring or regex. None returns all tasks,
else return tasks whose 'id' matches `fltr`.
:type fltr: str | None
:param completed: completed tasks only
:type completed: bool
:param all_: If True, include all tasks
:type all_: bool
:returns: a list of tasks
:rtype: [Task]
"""
keys = ['tasks']
show_completed = completed or all_
if show_completed:
keys.extend(['completed_tasks'])
tasks = []
# get all frameworks
for framework in self._framework_dicts(True, True, True):
for task in _merge(framework, keys):
state = task.get("state")
if completed and state not in COMPLETED_TASK_STATES:
continue
if fltr is None or \
fltr in task['id'] or \
fnmatch.fnmatchcase(task['id'], fltr):
task = self._framework_obj(framework).task(task['id'])
tasks.append(task)
return tasks
def get_container_id(self, task_id):
"""Returns the container ID for a task ID matching `task_id`
:param task_id: The task ID which will be mapped to container ID
:type task_id: str
:returns: The container ID associated with 'task_id'
:rtype: str
"""
def _get_task(task_id):
candidates = []
if 'frameworks' in self.state():
for framework in self.state()['frameworks']:
if 'tasks' in framework:
for task in framework['tasks']:
if 'id' in task:
if task['id'].startswith(task_id):
candidates.append(task)
if len(candidates) == 1:
return candidates[0]
raise DCOSException(
"More than one task matching '{}' found: {}"
.format(task_id, candidates))
def _get_container_status(task):
if 'statuses' in task:
if len(task['statuses']) > 0:
if 'container_status' in task['statuses'][0]:
return task['statuses'][0]['container_status']
raise DCOSException(
"Unable to obtain container status for task '{}'"
.format(task['id']))
def _get_container_id(container_status):
if 'container_id' in container_status:
if 'value' in container_status['container_id']:
return container_status['container_id']
raise DCOSException(
"No container found for the specified task."
" It might still be spinning up."
" Please try again.")
if not task_id:
raise DCOSException("Invalid task ID")
task = _get_task(task_id)
container_status = _get_container_status(task)
return _get_container_id(container_status)
def frameworks(self, inactive=False, completed=False):
"""Returns a list of all frameworks
:param inactive: also include inactive frameworks
:type inactive: bool
:param completed: also include completed frameworks
:type completed: bool
:returns: a list of frameworks
:rtype: [Framework]
"""
return [self._framework_obj(framework)
for framework in self._framework_dicts(inactive, completed)]
def _slave_obj(self, slave):
"""Returns the Slave object corresponding to the provided `slave`
dict. Creates it if it doesn't exist already.
:param slave: slave
:type slave: dict
:returns: Slave
:rtype: Slave
"""
if slave['id'] not in self._slaves:
self._slaves[slave['id']] = Slave(slave, None, self)
return self._slaves[slave['id']]
def _framework_obj(self, framework):
"""Returns the Framework object corresponding to the provided `framework`
dict. Creates it if it doesn't exist already.
:param framework: framework
:type framework: dict
:returns: Framework
:rtype: Framework
"""
if framework['id'] not in self._frameworks:
self._frameworks[framework['id']] = Framework(framework, self)
return self._frameworks[framework['id']]
def _framework_dicts(self, inactive=False, completed=False, active=True):
"""Returns a list of all frameworks as their raw dictionaries
:param inactive: include inactive frameworks
:type inactive: bool
:param completed: include completed frameworks
:type completed: bool
:param active: include active frameworks
:type active: bool
:returns: a list of frameworks
"""
if completed:
for framework in self.state()['completed_frameworks']:
yield framework
for framework in self.state()['frameworks']:
active_state = framework['active']
if (active_state and active) or (not active_state and inactive):
yield framework
class Slave(object):
"""Mesos Slave Model
:param short_state: slave's entry from the master's state.json
:type short_state: dict
:param state: slave's state.json
:type state: dict | None
:param master: slave's master
:type master: Master
"""
def __init__(self, short_state, state, master):
self._short_state = short_state
self._state = state
self._master = master
def state(self):
"""Get the slave's state.json object. Fetch it if it's not already
an instance variable.
:returns: This slave's state.json object
:rtype: dict
"""
if not self._state:
self._state = DCOSClient().get_slave_state(self['id'],
self.http_url())
return self._state
def http_url(self):
"""
:returns: The private HTTP URL of the slave. Derived from the
`pid` property.
:rtype: str
"""
parsed_pid = parse_pid(self['pid'])
return 'http://{}:{}'.format(parsed_pid[1], parsed_pid[2])
def _framework_dicts(self):
"""Returns the framework dictionaries from the state.json dict
:returns: frameworks
:rtype: [dict]
"""
return _merge(self.state(), ['frameworks', 'completed_frameworks'])
def executor_dicts(self):
"""Returns the executor dictionaries from the state.json
:returns: executors
:rtype: [dict]
"""
iters = [_merge(framework, ['executors', 'completed_executors'])
for framework in self._framework_dicts()]
return itertools.chain(*iters)
def __getitem__(self, name):
"""Support the slave[attr] syntax
:param name: attribute to get
:type name: str
:returns: the value for this attribute in the underlying
slave dictionary
:rtype: object
"""
return self._short_state[name]
class Framework(object):
""" Mesos Framework Model
:param framework: framework properties
:type framework: dict
:param master: framework's master
:type master: Master
"""
def __init__(self, framework, master):
self._framework = framework
self._master = master
self._tasks = {} # id->Task map
def task(self, task_id):
"""Returns a task by id
:param task_id: the task's id
:type task_id: str
:returns: the task
:rtype: Task
"""
for task in _merge(self._framework, ['tasks', 'completed_tasks']):
if task['id'] == task_id:
return self._task_obj(task)
return None
def _task_obj(self, task):
"""Returns the Task object corresponding to the provided `task`
dict. Creates it if it doesn't exist already.
:param task: task
:type task: dict
:returns: Task
:rtype: Task
"""
if task['id'] not in self._tasks:
self._tasks[task['id']] = Task(task, self._master)
return self._tasks[task['id']]
def dict(self):
return self._framework
def __getitem__(self, name):
"""Support the framework[attr] syntax
:param name: attribute to get
:type name: str
:returns: the value for this attribute in the underlying
framework dictionary
:rtype: object
"""
return self._framework[name]
class Task(object):
"""Mesos Task Model.
:param task: task properties
:type task: dict
:param master: mesos master
:type master: Master
"""
def __init__(self, task, master):
self._task = task
self._master = master
def dict(self):
"""
:returns: dictionary representation of this Task
:rtype: dict
"""
return self._task
def framework(self):
"""Returns this task's framework
:returns: task's framework
:rtype: Framework
"""
return self._master.framework(self["framework_id"])
def slave(self):
"""Returns the task's slave
:returns: task's slave
:rtype: Slave
"""
return self._master.slave(self["slave_id"])
def user(self):
"""Task owner
:returns: task owner
:rtype: str
"""
return self.framework()['user']
def executor(self):
""" Returns this tasks' executor
:returns: task's executor
:rtype: dict
"""
for executor in self.slave().executor_dicts():
tasks = _merge(executor,
['completed_tasks',
'tasks',
'queued_tasks'])
if any(task['id'] == self['id'] for task in tasks):
return executor
return None
def directory(self):
""" Sandbox directory for this task
:returns: path to task's sandbox
:rtype: str
"""
return self.executor()['directory']
def __getitem__(self, name):
"""Support the task[attr] syntax
:param name: attribute to get
:type name: str
:returns: the value for this attribute in the underlying
task dictionary
:rtype: object
"""
return self._task[name]
def __contains__(self, name):
"""Supprt the `attr in task` syntax
:param name: attribute to test
:type name: str
:returns: True if attribute is present in the underlying dict
:rtype: bool
"""
return name in self._task
class MesosFile(object):
"""File-like object that is backed by a remote slave or master file.
Uses the files/read.json endpoint.
If `task` is provided, the file host is `task.slave()`. If
`slave` is provided, the file host is `slave`. It is invalid to
provide both. If neither is provided, the file host is the
leading master.
:param path: file's path, relative to the sandbox if `task` is given
:type path: str
:param task: file's task
:type task: Task | None
:param slave: slave where the file lives
:type slave: Slave | None
:param dcos_client: client to use for network requests
:type dcos_client: DCOSClient | None
"""
def __init__(self, path, task=None, slave=None, dcos_client=None):
if task and slave:
raise ValueError(
"You cannot provide both `task` and `slave` " +
"arguments. `slave` is understood to be `task.slave()`")
if slave:
self._slave = slave
elif task:
self._slave = task.slave()
else:
self._slave = None
self._task = task
self._path = path
self._dcos_client = dcos_client or DCOSClient()
self._cursor = 0
def size(self):
"""Size of the file
:returns: size of the file
:rtype: int
"""
params = self._params(0, offset=-1)
return self._fetch(params)["offset"]
def seek(self, offset, whence=os.SEEK_SET):
"""Seek to the provided location in the file.
:param offset: location to seek to
:type offset: int
:param whence: determines whether `offset` represents a
location that is absolute, relative to the
beginning of the file, or relative to the end
of the file
:type whence: os.SEEK_SET | os.SEEK_CUR | os.SEEK_END
:returns: None
:rtype: None
"""
if whence == os.SEEK_SET:
self._cursor = 0 + offset
elif whence == os.SEEK_CUR:
self._cursor += offset
elif whence == os.SEEK_END:
self._cursor = self.size() + offset
else:
raise ValueError(
"Unexpected value for `whence`: {}".format(whence))
def tell(self):
""" The current cursor position.
:returns: the current cursor position
:rtype: int
"""
return self._cursor
def read(self, length=None):
"""Reads up to `length` bytes, or the entire file if `length` is None.
:param length: number of bytes to read
:type length: int | None
:returns: data read
:rtype: str
"""
data = ''
while length is None or length - len(data) > 0:
chunk_length = -1 if length is None else length - len(data)
chunk = self._fetch_chunk(chunk_length)
if chunk == '':
break
data += chunk
return data
def _host_path(self):
""" The absolute path to the file on slave.
:returns: the absolute path to the file on slave
:rtype: str
"""
if self._task:
directory = self._task.directory().rstrip('/')
executor = self._task.executor()
# executor.type is currently used only by pods. All tasks in a pod
# share an executor, so if this is a pod, get the task logs instead
# of the executor logs
if executor.get('type') == "DEFAULT":
task_id = self._task.dict().get('id')
return directory + '/tasks/{}/'.format(task_id) + self._path
else:
return directory + '/' + self._path
else:
return self._path
def _params(self, length, offset=None):
"""GET parameters to send to files/read.json. See the MesosFile
docstring for full information.
:param length: number of bytes to read
:type length: int
:param offset: start location. if None, will use the location
of the current file cursor
:type offset: int
:returns: GET parameters
:rtype: dict
"""
if offset is None:
offset = self._cursor
return {
'path': self._host_path(),
'offset': offset,
'length': length
}
def _fetch_chunk(self, length, offset=None):
"""Fetch data from files/read.json
:param length: number of bytes to fetch
:type length: int
:param offset: start location. If not None, this file's
cursor is set to `offset`
:type offset: int
:returns: data read
:rtype: str
"""
if offset is not None:
self.seek(offset, os.SEEK_SET)
params = self._params(length)
data = self._fetch(params)["data"]
self.seek(len(data), os.SEEK_CUR)
return data
def _fetch(self, params):
"""Fetch data from files/read.json
:param params: GET parameters
:type params: dict
:returns: response dict
:rtype: dict
"""
if self._slave:
return self._dcos_client.slave_file_read(self._slave['id'],
self._slave.http_url(),
**params)
else:
return self._dcos_client.master_file_read(**params)
def __str__(self):
"""String representation of the file: <task_id:file_path>
:returns: string representation of the file
:rtype: str
"""
if self._task:
return "task:{0}:{1}".format(self._task['id'], self._path)
elif self._slave:
return "slave:{0}:{1}".format(self._slave['id'], self._path)
else:
return "master:{0}".format(self._path)
def parse_pid(pid):
""" Parse the mesos pid string,
:param pid: pid of the form "id@ip:port"
:type pid: str
:returns: (id, ip, port)
:rtype: (str, str, str)
"""
id_, second = pid.split('@')
ip, port = second.split(':')
return id_, ip, port
def _merge(d, keys):
""" Merge multiple lists from a dictionary into one iterator.
e.g. _merge({'a': [1, 2], 'b': [3]}, ['a', 'b']) ->
iter(1, 2, 3)
:param d: dictionary
:type d: dict
:param keys: keys to merge
:type keys: [hashable]
:returns: iterator
:rtype: iter
"""
return itertools.chain(*[d[k] for k in keys])
|
{
"content_hash": "50cb992e2bc8fd2b5132d0264be8dff5",
"timestamp": "",
"source": "github",
"line_count": 981,
"max_line_length": 120,
"avg_line_length": 30.14169215086646,
"alnum_prop": 0.5444553417430417,
"repo_name": "mesosphere/marathon",
"id": "7df2e64979213076ab4275c8beb17fe2b49e36d9",
"size": "29569",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/shakedown/shakedown/clients/mesos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "59278"
},
{
"name": "Dockerfile",
"bytes": "6894"
},
{
"name": "Groovy",
"bytes": "17238"
},
{
"name": "HCL",
"bytes": "310"
},
{
"name": "HTML",
"bytes": "16356"
},
{
"name": "Java",
"bytes": "36549"
},
{
"name": "Liquid",
"bytes": "1484"
},
{
"name": "Makefile",
"bytes": "14608"
},
{
"name": "Python",
"bytes": "428283"
},
{
"name": "RAML",
"bytes": "356"
},
{
"name": "Ruby",
"bytes": "772"
},
{
"name": "Scala",
"bytes": "4902328"
},
{
"name": "Shell",
"bytes": "50550"
}
],
"symlink_target": ""
}
|
import pytest
from formulaic.materializers.types import ScopedFactor
from formulaic.parser.types import Factor
class TestScopedFactor:
@pytest.fixture
def scoped_factor(self):
return ScopedFactor(Factor("a"))
@pytest.fixture
def scoped_factor_reduced(self):
return ScopedFactor(Factor("a"), reduced=True)
def test_repr(self, scoped_factor, scoped_factor_reduced):
assert repr(scoped_factor) == "a"
assert repr(scoped_factor_reduced) == "a-"
def test_hash(self, scoped_factor, scoped_factor_reduced):
assert hash(scoped_factor) == hash("a")
assert hash(scoped_factor_reduced) == hash("a-")
def test_equality(self, scoped_factor, scoped_factor_reduced):
assert scoped_factor == scoped_factor
assert scoped_factor != scoped_factor_reduced
assert scoped_factor != 1
def test_sort(self, scoped_factor, scoped_factor_reduced):
assert scoped_factor_reduced < scoped_factor
assert scoped_factor < ScopedFactor(Factor("b"))
with pytest.raises(TypeError):
scoped_factor < 1
|
{
"content_hash": "d1fd922956f00431fb1ad753c5a24a48",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 66,
"avg_line_length": 32.73529411764706,
"alnum_prop": 0.6738544474393531,
"repo_name": "matthewwardrop/formulaic",
"id": "715958d941ef457754b8f8e2fafe8db1023f7354",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/materializers/types/test_scoped_factor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "372702"
}
],
"symlink_target": ""
}
|
import logging
from solitude.middleware import get_oauth_key, get_transaction_id
def getLogger(name=None):
logger = logging.getLogger(name)
return SolitudeAdapter(logger)
# This really should be fulfilled by a logging filter which would remove the
# need to do all this crap. However I've got no idea how to do that and I
# wasted far too long on this.
class SolitudeAdapter(logging.LoggerAdapter):
"""Adds OAuth user and transaction id to every logging message's kwargs."""
def __init__(self, logger, extra=None):
logging.LoggerAdapter.__init__(self, logger, extra or {})
def process(self, msg, kwargs):
kwargs['extra'] = {'OAUTH_KEY': get_oauth_key(),
'TRANSACTION_ID': get_transaction_id()}
return msg, kwargs
class SolitudeFormatter(logging.Formatter):
def format(self, record):
for name in 'OAUTH_KEY', 'TRANSACTION_ID':
record.__dict__.setdefault(name, '')
return logging.Formatter.format(self, record)
|
{
"content_hash": "55682ab1201e8f76ef18f948fda419fd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 33.935483870967744,
"alnum_prop": 0.6539923954372624,
"repo_name": "muffinresearch/solitude",
"id": "52d23672ce9824cbedaa2045948c104ea0b9787a",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solitude/logger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "405779"
},
{
"name": "Shell",
"bytes": "3235"
}
],
"symlink_target": ""
}
|
from gi.repository import Gtk
class ButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Button Demo")
self.set_border_width(10)
hbox = Gtk.Box(spacing=6)
self.add(hbox)
button = Gtk.Button("Click Me")
button.connect("clicked", self.on_click_me_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_OPEN)
button.connect("clicked", self.on_open_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button("_Close", use_underline=True)
button.connect("clicked", self.on_close_clicked)
hbox.pack_start(button, True, True, 0)
def on_click_me_clicked(self, button):
print("\"Click me\" button was clicked")
def on_open_clicked(self, button):
print("\"Open\" button was clicked")
def on_close_clicked(self, button):
print("Closing application")
Gtk.main_quit()
win = ButtonWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
{
"content_hash": "88c6c483590d4fa2bb015ea744576e4b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 59,
"avg_line_length": 28.945945945945947,
"alnum_prop": 0.6209150326797386,
"repo_name": "lichengshuang/createvhost",
"id": "abd1a9895c0408432cd70b3048994863089cda7e",
"size": "1071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/others/Preview/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "84170"
},
{
"name": "C",
"bytes": "25320"
},
{
"name": "CSS",
"bytes": "1323"
},
{
"name": "HTML",
"bytes": "26691"
},
{
"name": "JavaScript",
"bytes": "205981"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "915418"
},
{
"name": "Roff",
"bytes": "6734"
},
{
"name": "Shell",
"bytes": "1548839"
},
{
"name": "Vim script",
"bytes": "56257"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0);
|
{
"content_hash": "0b2a5b56210cf1f5e4f349d805b9bf2d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 173,
"avg_line_length": 39,
"alnum_prop": 0.7142857142857143,
"repo_name": "antoinecarme/pyaf",
"id": "ba3601cc1cd8e9d380a86cdba6657358a78a71c5",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_MovingAverage/cycle_12/ar_/test_artificial_1024_Integration_MovingAverage_12__100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import errno
import librosa
import matplotlib, matplotlib.pyplot as plt
import numpy
import os
import os.path
import sklearn
import urllib.request
def init():
plt.style.use('seaborn-muted')
#plt.rcParams['figure.figsize'] = (14, 5)
plt.rcParams['axes.grid'] = True
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.bottom'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = None
def extract_features(signal, features):
fvs = list()
for feature_name in features:
if feature_name == 'zero_crossing_rate':
fvs.append( librosa.feature.zero_crossing_rate(signal)[0, 0] )
elif feature_name == 'spectral_centroid':
fvs.append( librosa.feature.spectral_centroid(signal)[0, 0] )
return fvs
def get_features(collection='drum_samples_train',
features=('zero_crossing_rate', 'spectral_centroid'),
scaler=None,
download=True):
if collection == 'drum_samples_train':
kick_filepaths, snare_filepaths = download_samples('drum_samples_train', download=download)
kick_signals = [
librosa.load(p)[0] for p in kick_filepaths
]
snare_signals = [
librosa.load(p)[0] for p in snare_filepaths
]
kick_features = numpy.array([extract_features(x, features) for x in kick_signals])
snare_features = numpy.array([extract_features(x, features) for x in snare_signals])
feature_table = numpy.vstack((kick_features, snare_features))
if scaler is None:
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1, 1))
scaler.fit(feature_table)
training_features = scaler.transform(feature_table)
kick_labels = numpy.zeros(10)
snare_labels = numpy.ones(10)
training_labels = numpy.concatenate((kick_labels, snare_labels))
return training_features, training_labels, scaler
elif collection == 'drum_samples_test':
kick_filepaths, snare_filepaths = download_samples('drum_samples_test', download=download)
kick_signals = [
librosa.load(p)[0] for p in kick_filepaths
]
snare_signals = [
librosa.load(p)[0] for p in snare_filepaths
]
kick_features = numpy.array([extract_features(x, features) for x in kick_signals])
snare_features = numpy.array([extract_features(x, features) for x in snare_signals])
feature_table = numpy.vstack((kick_features, snare_features))
if scaler is None:
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1, 1))
scaler.fit(feature_table)
test_features = scaler.transform(feature_table)
kick_labels = numpy.zeros(30)
snare_labels = numpy.ones(30)
labels = numpy.concatenate((kick_labels, snare_labels))
return test_features, labels, scaler
def download_samples(collection='drum_samples_train', download=True):
"""Download ten kick drum samples and ten snare drum samples.
`collection`: output directory containing the twenty drum samples
Returns:
`kick_filepaths`: list of kick drum filepaths
`snare_filepaths`: list of snare drum filepaths
"""
try:
os.makedirs(collection)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if collection == 'drum_samples_train':
if download:
for drum_type in ['kick', 'snare']:
for i in range(1, 11):
filename = '%s_%02d.wav' % (drum_type, i)
urllib.request.urlretrieve('http://audio.musicinformationretrieval.com/drum_samples/%s' % filename,
filename=os.path.join(collection, filename))
kick_filepaths = [os.path.join(collection, 'kick_%02d.wav' % i) for i in range(1, 11)]
snare_filepaths = [os.path.join(collection, 'snare_%02d.wav' % i) for i in range(1, 11)]
return kick_filepaths, snare_filepaths
elif collection == 'drum_samples_test':
if download:
for drum_type in ['kick', 'snare']:
for i in range(30):
filename = '%s_%02d.wav' % (drum_type, i)
urllib.request.urlretrieve('http://audio.musicinformationretrieval.com/drum_samples/test/%s' % filename,
filename=os.path.join(collection, filename))
kick_filepaths = [os.path.join(collection, 'kick_%02d.wav' % i) for i in range(30)]
snare_filepaths = [os.path.join(collection, 'snare_%02d.wav' % i) for i in range(30)]
return kick_filepaths, snare_filepaths
elif collection == 'violin_samples_train':
urllib.request.urlretrieve('http://audio.musicinformationretrieval.com/violin_samples_train/list.txt',
filename=os.path.join(collection, 'list.txt'))
for line in open(os.path.join(collection, 'list.txt'), 'r'):
filename = line.strip()
print(filename)
if filename.endswith('.wav'):
urllib.request.urlretrieve('http://audio.musicinformationretrieval.com/' + filename,
filename=filename)
return [os.path.join(collection, f) for f in os.listdir(collection)]
|
{
"content_hash": "3d147df295df27f0075edd07392bf10a",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 124,
"avg_line_length": 41.78947368421053,
"alnum_prop": 0.6178481468154012,
"repo_name": "stevetjoa/stanford-mir",
"id": "641657f97e63667f2464bbdfa363e25898cd18ae",
"size": "5558",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "stanford_mir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "504"
},
{
"name": "HTML",
"bytes": "81069535"
},
{
"name": "Jupyter Notebook",
"bytes": "67676431"
},
{
"name": "Python",
"bytes": "12770"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.polar.angularaxis"
_path_str = "layout.polar.angularaxis.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.a
ngularaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.polar.angularaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.angularaxis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "b877a92bdadcd3103eac366f78282045",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 84,
"avg_line_length": 37.440528634361236,
"alnum_prop": 0.5601835510060007,
"repo_name": "plotly/python-api",
"id": "671860b4c0c90c9d87ff23ecabded6deb3bf2d9c",
"size": "8499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/polar/angularaxis/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import itertools
import re
import requests
import textwrap
SPIRV_HTML_SPEC_URL = 'https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html'
SPIRV_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/spirv.core.grammar.json'
AUTOGEN_OP_DEF_SEPARATOR = '\n// -----\n\n'
AUTOGEN_ENUM_SECTION_MARKER = 'enum section. Generated from SPIR-V spec; DO NOT MODIFY!'
AUTOGEN_OPCODE_SECTION_MARKER = (
'opcode section. Generated from SPIR-V spec; DO NOT MODIFY!')
def get_spirv_doc_from_html_spec():
"""Extracts instruction documentation from SPIR-V HTML spec.
Returns:
- A dict mapping from instruction opcode to documentation.
"""
response = requests.get(SPIRV_HTML_SPEC_URL)
spec = response.content
from bs4 import BeautifulSoup
spirv = BeautifulSoup(spec, 'html.parser')
section_anchor = spirv.find('h3', {'id': '_a_id_instructions_a_instructions'})
doc = {}
for section in section_anchor.parent.find_all('div', {'class': 'sect3'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td.p
opname = inst_html.a['id']
# Ignore the first line, which is just the opname.
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
return doc
def get_spirv_grammar_from_json_spec():
"""Extracts operand kind and instruction grammar from SPIR-V JSON spec.
Returns:
- A list containing all operand kinds' grammar
- A list containing all instructions' grammar
"""
response = requests.get(SPIRV_JSON_SPEC_URL)
spec = response.content
import json
spirv = json.loads(spec)
return spirv['operand_kinds'], spirv['instructions']
def split_list_into_sublists(items, offset):
"""Split the list of items into multiple sublists.
This is to make sure the string composed from each sublist won't exceed
80 characters.
Arguments:
- items: a list of strings
- offset: the offset in calculating each sublist's length
"""
chuncks = []
chunk = []
chunk_len = 0
for item in items:
chunk_len += len(item) + 2
if chunk_len > 80:
chuncks.append(chunk)
chunk = []
chunk_len = len(item) + 2
chunk.append(item)
if len(chunk) != 0:
chuncks.append(chunk)
return chuncks
def uniquify_enum_cases(lst):
"""Prunes duplicate enum cases from the list.
Arguments:
- lst: List whose elements are to be uniqued. Assumes each element is a
(symbol, value) pair and elements already sorted according to value.
Returns:
- A list with all duplicates removed. The elements are sorted according to
value and, for each value, uniqued according to symbol.
original list,
- A map from deduplicated cases to the uniqued case.
"""
cases = lst
uniqued_cases = []
duplicated_cases = {}
# First sort according to the value
cases.sort(key=lambda x: x[1])
# Then group them according to the value
for _, groups in itertools.groupby(cases, key=lambda x: x[1]):
# For each value, sort according to the enumerant symbol.
sorted_group = sorted(groups, key=lambda x: x[0])
# Keep the "smallest" case, which is typically the symbol without extension
# suffix. But we have special cases that we want to fix.
case = sorted_group[0]
for i in range(1, len(sorted_group)):
duplicated_cases[sorted_group[i][0]] = case[0]
if case[0] == 'HlslSemanticGOOGLE':
assert len(sorted_group) == 2, 'unexpected new variant for HlslSemantic'
case = sorted_group[1]
duplicated_cases[sorted_group[0][0]] = case[0]
uniqued_cases.append(case)
return uniqued_cases, duplicated_cases
def toposort(dag, sort_fn):
"""Topologically sorts the given dag.
Arguments:
- dag: a dict mapping from a node to its incoming nodes.
- sort_fn: a function for sorting nodes in the same batch.
Returns:
A list containing topologically sorted nodes.
"""
# Returns the next batch of nodes without incoming edges
def get_next_batch(dag):
while True:
no_prev_nodes = set(node for node, prev in dag.items() if not prev)
if not no_prev_nodes:
break
yield sorted(no_prev_nodes, key=sort_fn)
dag = {
node: (prev - no_prev_nodes)
for node, prev in dag.items()
if node not in no_prev_nodes
}
assert not dag, 'found cyclic dependency'
sorted_nodes = []
for batch in get_next_batch(dag):
sorted_nodes.extend(batch)
return sorted_nodes
def toposort_capabilities(all_cases, capability_mapping):
"""Returns topologically sorted capability (symbol, value) pairs.
Arguments:
- all_cases: all capability cases (containing symbol, value, and implied
capabilities).
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td.
Returns:
A list containing topologically sorted capability (symbol, value) pairs.
"""
dag = {}
name_to_value = {}
for case in all_cases:
# Get the current capability.
cur = case['enumerant']
name_to_value[cur] = case['value']
# Ignore duplicated symbols.
if cur in capability_mapping:
continue
# Get capabilities implied by the current capability.
prev = case.get('capabilities', [])
uniqued_prev = set([capability_mapping.get(c, c) for c in prev])
dag[cur] = uniqued_prev
sorted_caps = toposort(dag, lambda x: name_to_value[x])
# Attach the capability's value as the second component of the pair.
return [(c, name_to_value[c]) for c in sorted_caps]
def get_capability_mapping(operand_kinds):
"""Returns the capability mapping from duplicated cases to canonicalized ones.
Arguments:
- operand_kinds: all operand kinds' grammar spec
Returns:
- A map mapping from duplicated capability symbols to the canonicalized
symbol chosen for SPIRVBase.td.
"""
# Find the operand kind for capability
cap_kind = {}
for kind in operand_kinds:
if kind['kind'] == 'Capability':
cap_kind = kind
kind_cases = [
(case['enumerant'], case['value']) for case in cap_kind['enumerants']
]
_, capability_mapping = uniquify_enum_cases(kind_cases)
return capability_mapping
def get_availability_spec(enum_case, capability_mapping, for_op, for_cap):
"""Returns the availability specification string for the given enum case.
Arguments:
- enum_case: the enum case to generate availability spec for. It may contain
'version', 'lastVersion', 'extensions', or 'capabilities'.
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td.
- for_op: bool value indicating whether this is the availability spec for an
op itself.
- for_cap: bool value indicating whether this is the availability spec for
capabilities themselves.
Returns:
- A `let availability = [...];` string if with availability spec or
empty string if without availability spec
"""
assert not (for_op and for_cap), 'cannot set both for_op and for_cap'
DEFAULT_MIN_VERSION = 'MinVersion<SPV_V_1_0>'
DEFAULT_MAX_VERSION = 'MaxVersion<SPV_V_1_5>'
DEFAULT_CAP = 'Capability<[]>'
DEFAULT_EXT = 'Extension<[]>'
min_version = enum_case.get('version', '')
if min_version == 'None':
min_version = ''
elif min_version:
min_version = 'MinVersion<SPV_V_{}>'.format(min_version.replace('.', '_'))
# TODO(antiagainst): delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not min_version:
min_version = DEFAULT_MIN_VERSION
max_version = enum_case.get('lastVersion', '')
if max_version:
max_version = 'MaxVersion<SPV_V_{}>'.format(max_version.replace('.', '_'))
# TODO(antiagainst): delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not max_version:
max_version = DEFAULT_MAX_VERSION
exts = enum_case.get('extensions', [])
if exts:
exts = 'Extension<[{}]>'.format(', '.join(sorted(set(exts))))
# We need to strip the minimal version requirement if this symbol is
# available via an extension, which means *any* SPIR-V version can support
# it as long as the extension is provided. The grammar's 'version' field
# under such case should be interpreted as this symbol is introduced as
# a core symbol since the given version, rather than a minimal version
# requirement.
min_version = DEFAULT_MIN_VERSION if for_op else ''
# TODO(antiagainst): delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not exts:
exts = DEFAULT_EXT
caps = enum_case.get('capabilities', [])
implies = ''
if caps:
canonicalized_caps = []
for c in caps:
if c in capability_mapping:
canonicalized_caps.append(capability_mapping[c])
else:
canonicalized_caps.append(c)
prefixed_caps = [
'SPV_C_{}'.format(c) for c in sorted(set(canonicalized_caps))
]
if for_cap:
# If this is generating the availability for capabilities, we need to
# put the capability "requirements" in implies field because now
# the "capabilities" field in the source grammar means so.
caps = ''
implies = 'list<I32EnumAttrCase> implies = [{}];'.format(
', '.join(prefixed_caps))
else:
caps = 'Capability<[{}]>'.format(', '.join(prefixed_caps))
implies = ''
# TODO(antiagainst): delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not caps:
caps = DEFAULT_CAP
avail = ''
# Compose availability spec if any of the requirements is not empty.
# For ops, because we have a default in SPV_Op class, omit if the spec
# is the same.
if (min_version or max_version or caps or exts) and not (
for_op and min_version == DEFAULT_MIN_VERSION and
max_version == DEFAULT_MAX_VERSION and caps == DEFAULT_CAP and
exts == DEFAULT_EXT):
joined_spec = ',\n '.join(
[e for e in [min_version, max_version, exts, caps] if e])
avail = '{} availability = [\n {}\n ];'.format(
'let' if for_op else 'list<Availability>', joined_spec)
return '{}{}{}'.format(implies, '\n ' if implies and avail else '', avail)
def gen_operand_kind_enum_attr(operand_kind, capability_mapping):
"""Generates the TableGen EnumAttr definition for the given operand kind.
Returns:
- The operand kind's name
- A string containing the TableGen EnumAttr definition
"""
if 'enumerants' not in operand_kind:
return '', ''
# Returns a symbol for the given case in the given kind. This function
# handles Dim specially to avoid having numbers as the start of symbols,
# which does not play well with C++ and the MLIR parser.
def get_case_symbol(kind_name, case_name):
if kind_name == 'Dim':
if case_name == '1D' or case_name == '2D' or case_name == '3D':
return 'Dim{}'.format(case_name)
return case_name
kind_name = operand_kind['kind']
is_bit_enum = operand_kind['category'] == 'BitEnum'
kind_category = 'Bit' if is_bit_enum else 'I32'
kind_acronym = ''.join([c for c in kind_name if c >= 'A' and c <= 'Z'])
name_to_case_dict = {}
for case in operand_kind['enumerants']:
name_to_case_dict[case['enumerant']] = case
if kind_name == 'Capability':
# Special treatment for capability cases: we need to sort them topologically
# because a capability can refer to another via the 'implies' field.
kind_cases = toposort_capabilities(operand_kind['enumerants'],
capability_mapping)
else:
kind_cases = [(case['enumerant'], case['value'])
for case in operand_kind['enumerants']]
kind_cases, _ = uniquify_enum_cases(kind_cases)
max_len = max([len(symbol) for (symbol, _) in kind_cases])
# Generate the definition for each enum case
fmt_str = 'def SPV_{acronym}_{case} {colon:>{offset}} '\
'{category}EnumAttrCase<"{symbol}", {value}>{avail}'
case_defs = []
for case in kind_cases:
avail = get_availability_spec(name_to_case_dict[case[0]],
capability_mapping,
False, kind_name == 'Capability')
case_def = fmt_str.format(
category=kind_category,
acronym=kind_acronym,
case=case[0],
symbol=get_case_symbol(kind_name, case[0]),
value=case[1],
avail=' {{\n {}\n}}'.format(avail) if avail else ';',
colon=':',
offset=(max_len + 1 - len(case[0])))
case_defs.append(case_def)
case_defs = '\n'.join(case_defs)
# Generate the list of enum case names
fmt_str = 'SPV_{acronym}_{symbol}';
case_names = [fmt_str.format(acronym=kind_acronym,symbol=case[0])
for case in kind_cases]
# Split them into sublists and concatenate into multiple lines
case_names = split_list_into_sublists(case_names, 6)
case_names = ['{:6}'.format('') + ', '.join(sublist)
for sublist in case_names]
case_names = ',\n'.join(case_names)
# Generate the enum attribute definition
enum_attr = '''def SPV_{name}Attr :
SPV_{category}EnumAttr<"{name}", "valid SPIR-V {name}", [
{cases}
]>;'''.format(
name=kind_name, category=kind_category, cases=case_names)
return kind_name, case_defs + '\n\n' + enum_attr
def gen_opcode(instructions):
""" Generates the TableGen definition to map opname to opcode
Returns:
- A string containing the TableGen SPV_OpCode definition
"""
max_len = max([len(inst['opname']) for inst in instructions])
def_fmt_str = 'def SPV_OC_{name} {colon:>{offset}} '\
'I32EnumAttrCase<"{name}", {value}>;'
opcode_defs = [
def_fmt_str.format(
name=inst['opname'],
value=inst['opcode'],
colon=':',
offset=(max_len + 1 - len(inst['opname']))) for inst in instructions
]
opcode_str = '\n'.join(opcode_defs)
decl_fmt_str = 'SPV_OC_{name}'
opcode_list = [
decl_fmt_str.format(name=inst['opname']) for inst in instructions
]
opcode_list = split_list_into_sublists(opcode_list, 6)
opcode_list = [
'{:6}'.format('') + ', '.join(sublist) for sublist in opcode_list
]
opcode_list = ',\n'.join(opcode_list)
enum_attr = 'def SPV_OpcodeAttr :\n'\
' SPV_I32EnumAttr<"{name}", "valid SPIR-V instructions", [\n'\
'{lst}\n'\
' ]>;'.format(name='Opcode', lst=opcode_list)
return opcode_str + '\n\n' + enum_attr
def update_td_opcodes(path, instructions, filter_list):
"""Updates SPIRBase.td with new generated opcode cases.
Arguments:
- path: the path to SPIRBase.td
- instructions: a list containing all SPIR-V instructions' grammar
- filter_list: a list containing new opnames to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_OPCODE_SECTION_MARKER)
assert len(content) == 3
# Extend opcode list with existing list
existing_opcodes = [k[11:] for k in re.findall('def SPV_OC_\w+', content[1])]
filter_list.extend(existing_opcodes)
filter_list = list(set(filter_list))
# Generate the opcode for all instructions in SPIR-V
filter_instrs = list(
filter(lambda inst: (inst['opname'] in filter_list), instructions))
# Sort instruction based on opcode
filter_instrs.sort(key=lambda inst: inst['opcode'])
opcode = gen_opcode(filter_instrs)
# Substitute the opcode
content = content[0] + AUTOGEN_OPCODE_SECTION_MARKER + '\n\n' + \
opcode + '\n\n// End ' + AUTOGEN_OPCODE_SECTION_MARKER \
+ content[2]
with open(path, 'w') as f:
f.write(content)
def update_td_enum_attrs(path, operand_kinds, filter_list):
"""Updates SPIRBase.td with new generated enum definitions.
Arguments:
- path: the path to SPIRBase.td
- operand_kinds: a list containing all operand kinds' grammar
- filter_list: a list containing new enums to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_ENUM_SECTION_MARKER)
assert len(content) == 3
# Extend filter list with existing enum definitions
existing_kinds = [
k[8:-4] for k in re.findall('def SPV_\w+Attr', content[1])]
filter_list.extend(existing_kinds)
capability_mapping = get_capability_mapping(operand_kinds)
# Generate definitions for all enums in filter list
defs = [
gen_operand_kind_enum_attr(kind, capability_mapping)
for kind in operand_kinds
if kind['kind'] in filter_list
]
# Sort alphabetically according to enum name
defs.sort(key=lambda enum : enum[0])
# Only keep the definitions from now on
# Put Capability's definition at the very beginning because capability cases
# will be referenced later
defs = [enum[1] for enum in defs if enum[0] == 'Capability'
] + [enum[1] for enum in defs if enum[0] != 'Capability']
# Substitute the old section
content = content[0] + AUTOGEN_ENUM_SECTION_MARKER + '\n\n' + \
'\n\n'.join(defs) + "\n\n// End " + AUTOGEN_ENUM_SECTION_MARKER \
+ content[2];
with open(path, 'w') as f:
f.write(content)
def snake_casify(name):
"""Turns the given name to follow snake_case convention."""
name = re.sub('\W+', '', name).split()
name = [s.lower() for s in name]
return '_'.join(name)
def map_spec_operand_to_ods_argument(operand):
"""Maps an operand in SPIR-V JSON spec to an op argument in ODS.
Arguments:
- A dict containing the operand's kind, quantifier, and name
Returns:
- A string containing both the type and name for the argument
"""
kind = operand['kind']
quantifier = operand.get('quantifier', '')
# These instruction "operands" are for encoding the results; they should
# not be handled here.
assert kind != 'IdResultType', 'unexpected to handle "IdResultType" kind'
assert kind != 'IdResult', 'unexpected to handle "IdResult" kind'
if kind == 'IdRef':
if quantifier == '':
arg_type = 'SPV_Type'
elif quantifier == '?':
arg_type = 'Optional<SPV_Type>'
else:
arg_type = 'Variadic<SPV_Type>'
elif kind == 'IdMemorySemantics' or kind == 'IdScope':
# TODO(antiagainst): Need to further constrain 'IdMemorySemantics'
# and 'IdScope' given that they should be generated from OpConstant.
assert quantifier == '', ('unexpected to have optional/variadic memory '
'semantics or scope <id>')
arg_type = 'SPV_' + kind[2:] + 'Attr'
elif kind == 'LiteralInteger':
if quantifier == '':
arg_type = 'I32Attr'
elif quantifier == '?':
arg_type = 'OptionalAttr<I32Attr>'
else:
arg_type = 'OptionalAttr<I32ArrayAttr>'
elif kind == 'LiteralString' or \
kind == 'LiteralContextDependentNumber' or \
kind == 'LiteralExtInstInteger' or \
kind == 'LiteralSpecConstantOpInteger' or \
kind == 'PairLiteralIntegerIdRef' or \
kind == 'PairIdRefLiteralInteger' or \
kind == 'PairIdRefIdRef':
assert False, '"{}" kind unimplemented'.format(kind)
else:
# The rest are all enum operands that we represent with op attributes.
assert quantifier != '*', 'unexpected to have variadic enum attribute'
arg_type = 'SPV_{}Attr'.format(kind)
if quantifier == '?':
arg_type = 'OptionalAttr<{}>'.format(arg_type)
name = operand.get('name', '')
name = snake_casify(name) if name else kind.lower()
return '{}:${}'.format(arg_type, name)
def get_description(text, appendix):
"""Generates the description for the given SPIR-V instruction.
Arguments:
- text: Textual description of the operation as string.
- appendix: Additional contents to attach in description as string,
includking IR examples, and others.
Returns:
- A string that corresponds to the description of the Tablegen op.
"""
fmt_str = '{text}\n\n <!-- End of AutoGen section -->\n{appendix}\n '
return fmt_str.format(text=text, appendix=appendix)
def get_op_definition(instruction, doc, existing_info, capability_mapping):
"""Generates the TableGen op definition for the given SPIR-V instruction.
Arguments:
- instruction: the instruction's SPIR-V JSON grammar
- doc: the instruction's SPIR-V HTML doc
- existing_info: a dict containing potential manually specified sections for
this instruction
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td
Returns:
- A string containing the TableGen op definition
"""
fmt_str = ('def SPV_{opname}Op : '
'SPV_{inst_category}<"{opname}"{category_args}[{traits}]> '
'{{\n let summary = {summary};\n\n let description = '
'[{{\n{description}}}];{availability}\n')
inst_category = existing_info.get('inst_category', 'Op')
if inst_category == 'Op':
fmt_str +='\n let arguments = (ins{args});\n\n'\
' let results = (outs{results});\n'
fmt_str +='{extras}'\
'}}\n'
opname = instruction['opname'][2:]
category_args = existing_info.get('category_args', '')
if '\n' in doc:
summary, text = doc.split('\n', 1)
else:
summary = doc
text = ''
wrapper = textwrap.TextWrapper(
width=76, initial_indent=' ', subsequent_indent=' ')
# Format summary. If the summary can fit in the same line, we print it out
# as a "-quoted string; otherwise, wrap the lines using "[{...}]".
summary = summary.strip();
if len(summary) + len(' let summary = "";') <= 80:
summary = '"{}"'.format(summary)
else:
summary = '[{{\n{}\n }}]'.format(wrapper.fill(summary))
# Wrap text
text = text.split('\n')
text = [wrapper.fill(line) for line in text if line]
text = '\n\n'.join(text)
operands = instruction.get('operands', [])
# Op availability
avail = ''
# We assume other instruction categories has a base availability spec, so
# only add this if this is directly using SPV_Op as the base.
if inst_category == 'Op':
avail = get_availability_spec(instruction, capability_mapping, True, False)
if avail:
avail = '\n\n {0}'.format(avail)
# Set op's result
results = ''
if len(operands) > 0 and operands[0]['kind'] == 'IdResultType':
results = '\n SPV_Type:$result\n '
operands = operands[1:]
if 'results' in existing_info:
results = existing_info['results']
# Ignore the operand standing for the result <id>
if len(operands) > 0 and operands[0]['kind'] == 'IdResult':
operands = operands[1:]
# Set op' argument
arguments = existing_info.get('arguments', None)
if arguments is None:
arguments = [map_spec_operand_to_ods_argument(o) for o in operands]
arguments = ',\n '.join(arguments)
if arguments:
# Prepend and append whitespace for formatting
arguments = '\n {}\n '.format(arguments)
description = existing_info.get('description', None)
if description is None:
assembly = '\n ```\n'\
' [TODO]\n'\
' ```mlir\n\n'\
' #### Example:\n\n'\
' ```\n'\
' [TODO]\n' \
' ```'
description = get_description(text, assembly)
return fmt_str.format(
opname=opname,
category_args=category_args,
inst_category=inst_category,
traits=existing_info.get('traits', ''),
summary=summary,
description=description,
availability=avail,
args=arguments,
results=results,
extras=existing_info.get('extras', ''))
def get_string_between(base, start, end):
"""Extracts a substring with a specified start and end from a string.
Arguments:
- base: string to extract from.
- start: string to use as the start of the substring.
- end: string to use as the end of the substring.
Returns:
- The substring if found
- The part of the base after end of the substring. Is the base string itself
if the substring wasnt found.
"""
split = base.split(start, 1)
if len(split) == 2:
rest = split[1].split(end, 1)
assert len(rest) == 2, \
'cannot find end "{end}" while extracting substring '\
'starting with {start}'.format(start=start, end=end)
return rest[0].rstrip(end), rest[1]
return '', split[0]
def get_string_between_nested(base, start, end):
"""Extracts a substring with a nested start and end from a string.
Arguments:
- base: string to extract from.
- start: string to use as the start of the substring.
- end: string to use as the end of the substring.
Returns:
- The substring if found
- The part of the base after end of the substring. Is the base string itself
if the substring wasn't found.
"""
split = base.split(start, 1)
if len(split) == 2:
# Handle nesting delimiters
rest = split[1]
unmatched_start = 1
index = 0
while unmatched_start > 0 and index < len(rest):
if rest[index:].startswith(end):
unmatched_start -= 1
if unmatched_start == 0:
break
index += len(end)
elif rest[index:].startswith(start):
unmatched_start += 1
index += len(start)
else:
index += 1
assert index < len(rest), \
'cannot find end "{end}" while extracting substring '\
'starting with "{start}"'.format(start=start, end=end)
return rest[:index], rest[index + len(end):]
return '', split[0]
def extract_td_op_info(op_def):
"""Extracts potentially manually specified sections in op's definition.
Arguments: - A string containing the op's TableGen definition
- doc: the instruction's SPIR-V HTML doc
Returns:
- A dict containing potential manually specified sections
"""
# Get opname
opname = [o[8:-2] for o in re.findall('def SPV_\w+Op', op_def)]
assert len(opname) == 1, 'more than one ops in the same section!'
opname = opname[0]
# Get instruction category
inst_category = [
o[4:] for o in re.findall('SPV_\w+Op',
op_def.split(':', 1)[1])
]
assert len(inst_category) <= 1, 'more than one ops in the same section!'
inst_category = inst_category[0] if len(inst_category) == 1 else 'Op'
# Get category_args
op_tmpl_params, _ = get_string_between_nested(op_def, '<', '>')
opstringname, rest = get_string_between(op_tmpl_params, '"', '"')
category_args = rest.split('[', 1)[0]
# Get traits
traits, _ = get_string_between_nested(rest, '[', ']')
# Get description
description, rest = get_string_between(op_def, 'let description = [{\n',
'}];\n')
# Get arguments
args, rest = get_string_between(rest, ' let arguments = (ins', ');\n')
# Get results
results, rest = get_string_between(rest, ' let results = (outs', ');\n')
extras = rest.strip(' }\n')
if extras:
extras = '\n {}\n'.format(extras)
return {
# Prefix with 'Op' to make it consistent with SPIR-V spec
'opname': 'Op{}'.format(opname),
'inst_category': inst_category,
'category_args': category_args,
'traits': traits,
'description': description,
'arguments': args,
'results': results,
'extras': extras
}
def update_td_op_definitions(path, instructions, docs, filter_list,
inst_category, capability_mapping):
"""Updates SPIRVOps.td with newly generated op definition.
Arguments:
- path: path to SPIRVOps.td
- instructions: SPIR-V JSON grammar for all instructions
- docs: SPIR-V HTML doc for all instructions
- filter_list: a list containing new opnames to include
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td.
Returns:
- A string containing all the TableGen op definitions
"""
with open(path, 'r') as f:
content = f.read()
# Split the file into chunks, each containing one op.
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
header = ops[0]
footer = ops[-1]
ops = ops[1:-1]
# For each existing op, extract the manually-written sections out to retain
# them when re-generating the ops. Also append the existing ops to filter
# list.
name_op_map = {} # Map from opname to its existing ODS definition
op_info_dict = {}
for op in ops:
info_dict = extract_td_op_info(op)
opname = info_dict['opname']
name_op_map[opname] = op
op_info_dict[opname] = info_dict
filter_list.append(opname)
filter_list = sorted(list(set(filter_list)))
op_defs = []
for opname in filter_list:
# Find the grammar spec for this op
try:
instruction = next(
inst for inst in instructions if inst['opname'] == opname)
op_defs.append(
get_op_definition(
instruction, docs[opname],
op_info_dict.get(opname, {'inst_category': inst_category}),
capability_mapping))
except StopIteration:
# This is an op added by us; use the existing ODS definition.
op_defs.append(name_op_map[opname])
# Substitute the old op definitions
op_defs = [header] + op_defs + [footer]
content = AUTOGEN_OP_DEF_SEPARATOR.join(op_defs)
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
import argparse
cli_parser = argparse.ArgumentParser(
description='Update SPIR-V dialect definitions using SPIR-V spec')
cli_parser.add_argument(
'--base-td-path',
dest='base_td_path',
type=str,
default=None,
help='Path to SPIRVBase.td')
cli_parser.add_argument(
'--op-td-path',
dest='op_td_path',
type=str,
default=None,
help='Path to SPIRVOps.td')
cli_parser.add_argument(
'--new-enum',
dest='new_enum',
type=str,
default=None,
help='SPIR-V enum to be added to SPIRVBase.td')
cli_parser.add_argument(
'--new-opcodes',
dest='new_opcodes',
type=str,
default=None,
nargs='*',
help='update SPIR-V opcodes in SPIRVBase.td')
cli_parser.add_argument(
'--new-inst',
dest='new_inst',
type=str,
default=None,
nargs='*',
help='SPIR-V instruction to be added to ops file')
cli_parser.add_argument(
'--inst-category',
dest='inst_category',
type=str,
default='Op',
help='SPIR-V instruction category used for choosing '\
'the TableGen base class to define this op')
args = cli_parser.parse_args()
operand_kinds, instructions = get_spirv_grammar_from_json_spec()
# Define new enum attr
if args.new_enum is not None:
assert args.base_td_path is not None
filter_list = [args.new_enum] if args.new_enum else []
update_td_enum_attrs(args.base_td_path, operand_kinds, filter_list)
# Define new opcode
if args.new_opcodes is not None:
assert args.base_td_path is not None
update_td_opcodes(args.base_td_path, instructions, args.new_opcodes)
# Define new op
if args.new_inst is not None:
assert args.op_td_path is not None
docs = get_spirv_doc_from_html_spec()
capability_mapping = get_capability_mapping(operand_kinds)
update_td_op_definitions(args.op_td_path, instructions, docs, args.new_inst,
args.inst_category, capability_mapping)
print('Done. Note that this script just generates a template; ', end='')
print('please read the spec and update traits, arguments, and ', end='')
print('results accordingly.')
|
{
"content_hash": "a08ce3837c1fe83c4cebb5ef9f1a942e",
"timestamp": "",
"source": "github",
"line_count": 940,
"max_line_length": 138,
"avg_line_length": 33.755319148936174,
"alnum_prop": 0.644500472738733,
"repo_name": "endlessm/chromium-browser",
"id": "5854a74509cdefdca5c9834947016e51080c222a",
"size": "32379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/llvm/mlir/utils/spirv/gen_spirv_dialect.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import logging
from seedbox import config, config_renderer
from seedbox.config_renderer.ignition.base import BaseIgnitionPackage
log = logging.getLogger(__name__)
class KubeProxyPackage(BaseIgnitionPackage):
def get_files(self):
user = self.cluster.k8s_kube_proxy_user
if not user:
log.warning('No user "%s" for kube-proxy in cluster %s', config.k8s_kube_proxy_user_name, self.cluster)
return []
return [
{
'filesystem': 'root',
'path': config.k8s_manifests_path + '/kube-proxy.yaml',
'mode': 0o644,
'contents': {
'source': self.to_data_url(self.render_template('kube-proxy.yaml')),
},
},
{
'filesystem': 'root',
'path': config.k8s_kube_proxy_config_path,
'mode': 0o644,
'contents': {
'source': self.to_data_url(config_renderer.kubeconfig.render([user], default_user=user)),
},
},
]
|
{
"content_hash": "203ab2127b520876434f673a2eae66e5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 115,
"avg_line_length": 33.21212121212121,
"alnum_prop": 0.5191605839416058,
"repo_name": "nailgun/seedbox",
"id": "fb4938f1fa15c81ca42584b240cf026c46b33088",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/seedbox/config_renderer/ignition/kube_proxy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13334"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "131526"
},
{
"name": "Shell",
"bytes": "747"
}
],
"symlink_target": ""
}
|
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
import six
from webob import exc
from nova.api.openstack.compute import hypervisors \
as hypervisors_v21
from nova.cells import utils as cells_utils
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
CPU_INFO = """
{"arch": "x86_64",
"vendor": "fake",
"topology": {"cores": 1, "threads": 1, "sockets": 1},
"features": [],
"model": ""}"""
TEST_HYPERS = [
dict(id=1,
uuid=uuids.hyper1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
uuid=uuids.hyper2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
uuid=uuids.service1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
uuid=uuids.service2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid=uuids.instance_1, host="compute1"),
dict(name="inst2", uuid=uuids.instance_2, host="compute2"),
dict(name="inst3", uuid=uuids.instance_3, host="compute1"),
dict(name="inst4", uuid=uuids.instance_4, host="compute2")]
def fake_compute_node_get_all(context, limit=None, marker=None):
if marker in ['99999', uuids.invalid_marker]:
raise exception.MarkerNotFound(marker)
marker_found = True if marker is None else False
output = []
for hyper in TEST_HYPERS_OBJ:
# Starting with the 2.53 microversion, the marker is a uuid.
if not marker_found and marker in (str(hyper.id), hyper.uuid):
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(hyper)
return output
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.uuid == compute_id or hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_service_get_by_compute_host(context, host):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += getattr(hyper, key)
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
inst_obj = fake_instance.fake_instance_obj(context, **inst)
results.append(inst_obj)
return results
class HypervisorsTestV21(test.NoDBTestCase):
api_version = '2.1'
# Allow subclasses to override if the id value in the response is the
# compute node primary key integer id or the uuid.
expect_uuid_for_id = False
# copying the objects locally so the cells testcases can provide their own
TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ)
TEST_SERVICES = copy.deepcopy(TEST_SERVICES)
TEST_SERVERS = copy.deepcopy(TEST_SERVERS)
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
del DETAIL_HYPERS_DICTS[0]['uuid']
del DETAIL_HYPERS_DICTS[1]['uuid']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
DETAIL_NULL_CPUINFO_DICT = {'': '', None: None}
def _get_request(self, use_admin_context, url=''):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.api_version)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def _get_hyper_id(self):
"""Helper function to get the proper hypervisor id for a request
:returns: The first hypervisor's uuid for microversions that expect a
uuid for the id, otherwise the hypervisor's id primary key
"""
return (self.TEST_HYPERS_OBJ[0].uuid if self.expect_uuid_for_id
else self.TEST_HYPERS_OBJ[0].id)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.rule_hyp_show = "os_compute_api:os-hypervisors"
host_api = self.controller.host_api
host_api.compute_node_get_all = mock.MagicMock(
side_effect=fake_compute_node_get_all)
host_api.service_get_by_compute_host = mock.MagicMock(
side_effect=fake_service_get_by_compute_host)
host_api.compute_node_search_by_hypervisor = mock.MagicMock(
side_effect=fake_compute_node_search_by_hypervisor)
host_api.compute_node_get = mock.MagicMock(
side_effect=fake_compute_node_get)
self.stub_out('nova.db.compute_node_statistics',
fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False, req)
self.assertEqual(self.INDEX_HYPER_DICTS[0], result)
def test_view_hypervisor_detail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True, req)
self.assertEqual(self.DETAIL_HYPERS_DICTS[0], result)
def test_view_hypervisor_servers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0],
self.TEST_SERVICES[0],
False, req,
self.TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid=uuids.instance_1),
dict(name="inst2", uuid=uuids.instance_2),
dict(name="inst3", uuid=uuids.instance_3),
dict(name="inst4", uuid=uuids.instance_4)]})
self.assertEqual(expected_dict, result)
def _test_view_hypervisor_detail_cpuinfo_null(self, cpu_info):
req = self._get_request(True)
test_hypervisor_obj = copy.deepcopy(self.TEST_HYPERS_OBJ[0])
test_hypervisor_obj.cpu_info = cpu_info
result = self.controller._view_hypervisor(test_hypervisor_obj,
self.TEST_SERVICES[0],
True, req)
expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
expected_dict.update({'cpu_info':
self.DETAIL_NULL_CPUINFO_DICT[cpu_info]})
self.assertEqual(result, expected_dict)
def test_view_hypervisor_detail_cpuinfo_empty_string(self):
self._test_view_hypervisor_detail_cpuinfo_null('')
def test_view_hypervisor_detail_cpuinfo_none(self):
self._test_view_hypervisor_detail_cpuinfo_null(None)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_index_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(dict(hypervisors=self.DETAIL_HYPERS_DICTS), result)
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_detail_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_show_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
@mock.patch.object(self.controller.host_api, 'compute_node_get',
return_value=self.TEST_HYPERS_OBJ[0])
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(self, mock_service, mock_compute_node_get):
req = self._get_request(True)
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
self.assertTrue(mock_service.called)
mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
_test(self)
def test_show_noid(self):
req = self._get_request(True)
hyperid = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, hyperid)
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.show(req, hyper_id)
self.assertEqual(dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]), result)
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req,
self._get_hyper_id())
def test_uptime_noid(self):
req = self._get_request(True)
hyper_id = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req,
hyper_id)
def test_uptime_notimplemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exc.HTTPNotImplemented()
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, hyper_id)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_implemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
return_value="fake uptime"
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.uptime(req, hyper_id)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(dict(hypervisor=expected_dict), result)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req,
self.TEST_HYPERS_OBJ[0].id)
def test_uptime_hypervisor_down(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.ComputeServiceUnavailable(host='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
@mock.patch.object(self.controller.host_api, 'compute_node_get')
@mock.patch.object(self.controller.host_api, 'get_host_uptime')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
side_effect=exception.HostMappingNotFound(
name='dummy'))
def _test(mock_get, _, __):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
self.assertTrue(mock_get.called)
_test()
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.HostMappingNotFound(name='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req,
self.TEST_HYPERS_OBJ[0].id)
def test_search_non_exist(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertEqual(1, mock_node_search.call_count)
def test_search_unmapped(self):
@mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(mock_service, mock_search):
mock_search.return_value = [mock.MagicMock()]
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertTrue(mock_service.called)
_test()
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
def test_servers(self, mock_get):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(uuid=uuids.instance_1),
dict(uuid=uuids.instance_3)]})
expected_dict[1].update({'servers': [
dict(uuid=uuids.instance_2),
dict(uuid=uuids.instance_4)]})
for output in result['hypervisors']:
servers = output['servers']
for server in servers:
del server['name']
self.assertEqual(dict(hypervisors=expected_dict), result)
def test_servers_not_mapped(self):
req = self._get_request(True)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host') as m:
m.side_effect = exception.HostMappingNotFound(name='something')
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'hyper')
def test_servers_non_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req,
self.TEST_HYPERS_OBJ[0].id)
def test_servers_with_non_integer_hypervisor_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_with_no_server(self):
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(True)
result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
self.assertTrue(mock_inst_get_all.called)
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)), result)
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
_CELL_PATH = 'cell1'
class CellHypervisorsTestV21(HypervisorsTestV21):
TEST_HYPERS_OBJ = [cells_utils.ComputeNodeProxy(obj, _CELL_PATH)
for obj in TEST_HYPERS_OBJ]
TEST_SERVICES = [cells_utils.ServiceProxy(obj, _CELL_PATH)
for obj in TEST_SERVICES]
TEST_SERVERS = [dict(server,
host=cells_utils.cell_with_item(_CELL_PATH,
server['host']))
for server in TEST_SERVERS]
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS = [dict(hyp, id=cells_utils.cell_with_item(_CELL_PATH,
hyp['id']),
service=dict(hyp['service'],
id=cells_utils.cell_with_item(
_CELL_PATH,
hyp['service']['id']),
host=cells_utils.cell_with_item(
_CELL_PATH,
hyp['service']['host'])))
for hyp in DETAIL_HYPERS_DICTS]
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
INDEX_HYPER_DICTS = [dict(hyp, id=cells_utils.cell_with_item(_CELL_PATH,
hyp['id']))
for hyp in INDEX_HYPER_DICTS]
# __deepcopy__ is added for copying an object locally in
# _test_view_hypervisor_detail_cpuinfo_null
cells_utils.ComputeNodeProxy.__deepcopy__ = (lambda self, memo:
cells_utils.ComputeNodeProxy(copy.deepcopy(self._obj, memo),
self._cell_path))
@classmethod
def fake_compute_node_get_all(cls, context, limit=None, marker=None):
return cls.TEST_HYPERS_OBJ
@classmethod
def fake_compute_node_search_by_hypervisor(cls, context, hypervisor_re):
return cls.TEST_HYPERS_OBJ
@classmethod
def fake_compute_node_get(cls, context, compute_id):
for hyper in cls.TEST_HYPERS_OBJ:
if hyper.id == compute_id:
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
@classmethod
def fake_service_get_by_compute_host(cls, context, host):
for service in cls.TEST_SERVICES:
if service.host == host:
return service
@classmethod
def fake_instance_get_all_by_host(cls, context, host):
results = []
for inst in cls.TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(CellHypervisorsTestV21, self).setUp()
host_api = self.controller.host_api
host_api.compute_node_get_all = mock.MagicMock(
side_effect=self.fake_compute_node_get_all)
host_api.service_get_by_compute_host = mock.MagicMock(
side_effect=self.fake_service_get_by_compute_host)
host_api.compute_node_search_by_hypervisor = mock.MagicMock(
side_effect=self.fake_compute_node_search_by_hypervisor)
host_api.compute_node_get = mock.MagicMock(
side_effect=self.fake_compute_node_get)
host_api.compute_node_statistics = mock.MagicMock(
side_effect=fake_compute_node_statistics)
host_api.instance_get_all_by_host = mock.MagicMock(
side_effect=self.fake_instance_get_all_by_host)
class HypervisorsTestV228(HypervisorsTestV21):
api_version = '2.28'
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS[0]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_HYPERS_DICTS[1]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_NULL_CPUINFO_DICT = {'': {}, None: {}}
class HypervisorsTestV233(HypervisorsTestV228):
api_version = '2.33'
def test_index_pagination(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=1&marker=1')
result = self.controller.index(req)
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_detail_pagination(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1')
result = self.controller.detail(req)
link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
class HypervisorsTestV252(HypervisorsTestV233):
"""This is a boundary test to make sure 2.52 works like 2.33."""
api_version = '2.52'
class HypervisorsTestV253(HypervisorsTestV252):
api_version = hypervisors_v21.UUID_FOR_ID_MIN_VERSION
expect_uuid_for_id = True
# This is an expected response for index().
INDEX_HYPER_DICTS = [
dict(id=uuids.hyper1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=uuids.hyper2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def setUp(self):
super(HypervisorsTestV253, self).setUp()
# This is an expected response for detail().
for index, detail_hyper_dict in enumerate(self.DETAIL_HYPERS_DICTS):
detail_hyper_dict['id'] = TEST_HYPERS[index]['uuid']
detail_hyper_dict['service']['id'] = TEST_SERVICES[index].uuid
def test_servers(self):
"""Asserts that calling the servers route after 2.48 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.servers,
self._get_request(True), 'hyper')
def test_servers_with_no_server(self):
"""Tests GET /os-hypervisors?with_servers=1 when there are no
instances on the given host.
"""
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_servers_not_mapped(self):
"""Tests that instance_get_all_by_host fails with HostMappingNotFound.
"""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name='something')):
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=[]), result)
def test_list_with_servers(self):
"""Tests GET /os-hypervisors?with_servers=True"""
instances = [
objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)]),
objects.InstanceList(objects=[objects.Instance(
id=2, uuid=uuids.hyper2_instance1)])]
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
side_effect=instances) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=True')
result = self.controller.index(req)
index_with_servers = copy.deepcopy(self.INDEX_HYPER_DICTS)
index_with_servers[0]['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
index_with_servers[1]['servers'] = [
{'name': 'instance-00000002', 'uuid': uuids.hyper2_instance1}]
self.assertEqual(dict(hypervisors=index_with_servers), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_list_with_servers_invalid_parameter(self):
"""Tests using an invalid with_servers query parameter."""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=invalid')
self.assertRaises(
exception.ValidationError, self.controller.index, req)
def test_list_with_hostname_pattern_and_paging_parameters(self):
"""This is a negative test to validate that trying to list hypervisors
with a hostname pattern and paging parameters results in a 400 error.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=foo&'
'limit=1&marker=%s' % uuids.marker)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
self.assertIn('Paging over hypervisors with the '
'hypervisor_hostname_pattern query parameter is not '
'supported.', six.text_type(ex))
def test_servers_with_non_integer_hypervisor_id(self):
"""This is a poorly named test, it's really checking the 404 case where
there is no match for the hostname pattern.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList()) as s:
self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
def test_servers_non_admin(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/servers route is deprecated.
"""
pass
def test_servers_non_id(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/servers route is deprecated.
"""
pass
def test_search_old_route(self):
"""Asserts that calling the search route after 2.48 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.search,
self._get_request(True), 'hyper')
def test_search(self):
"""Test listing hypervisors with details and using the
hypervisor_hostname_pattern query string.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList(
objects=[TEST_HYPERS_OBJ[0]])) as s:
result = self.controller.detail(req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('1.1.1.1'),
'hypervisor_hostname': 'hyper1',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[0].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute1',
'id': TEST_SERVICES[0].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
}
# There are no links when using the hypervisor_hostname_pattern
# query string since we can't page using a pattern matcher.
self.assertNotIn('hypervisors_links', result)
self.assertDictEqual(expected, result)
def test_search_invalid_hostname_pattern_parameter(self):
"""Tests passing an invalid hypervisor_hostname_pattern query
parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=invalid~host')
self.assertRaises(
exception.ValidationError, self.controller.detail, req)
def test_search_non_exist(self):
"""This is a duplicate of test_servers_with_non_integer_hypervisor_id.
"""
pass
def test_search_non_admin(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/search route is deprecated.
"""
pass
def test_search_unmapped(self):
"""This is already tested with test_index_compute_host_not_mapped."""
pass
def test_show_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_show_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to show().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.show, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_show_with_servers_invalid_parameter(self):
"""Tests passing an invalid value for the with_servers query parameter
to the show() method to make sure the query parameter is validated.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=invalid' % hyper_id)
ex = self.assertRaises(
exception.ValidationError, self.controller.show, req, hyper_id)
self.assertIn('with_servers', six.text_type(ex))
def test_show_with_servers_host_mapping_not_found(self):
"""Tests that a 404 is returned if instance_get_all_by_host raises
HostMappingNotFound.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=true' % hyper_id)
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name=hyper_id)):
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
def test_show_with_servers(self):
"""Tests the show() result when servers are included in the output."""
instances = objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)])
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=on' % hyper_id)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=instances) as mock_inst_get_all:
result = self.controller.show(req, hyper_id)
show_with_servers = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
show_with_servers['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
self.assertDictEqual(dict(hypervisor=show_with_servers), result)
# instance_get_all_by_host is called
mock_inst_get_all.assert_called_once_with(
req.environ['nova.context'], TEST_HYPERS_OBJ[0].host)
def test_uptime_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_uptime_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to
uptime().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_detail_pagination(self):
"""Tests details paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.detail(req)
link = ('http://localhost/v2/hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[1].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': TEST_SERVICES[1].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
"""Tests detail paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_index_pagination(self):
"""Tests index paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.index(req)
link = ('http://localhost/v2/hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [{
'hypervisor_hostname': 'hyper2',
'id': TEST_HYPERS_OBJ[1].uuid,
'state': 'up',
'status': 'enabled'
}],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
"""Tests index paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_list_duplicate_query_parameters_validation(self):
"""Tests that the list query parameter schema enforces only a single
entry for any query parameter.
"""
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_show_duplicate_query_parameters_validation(self):
"""Tests that the show query parameter schema enforces only a single
entry for any query parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=1&with_servers=1' %
uuids.hyper1)
self.assertRaises(exception.ValidationError,
self.controller.show, req, uuids.hyper1)
|
{
"content_hash": "60737a9f2037bc661ea3d36f799ffbec",
"timestamp": "",
"source": "github",
"line_count": 1274,
"max_line_length": 79,
"avg_line_length": 41.65070643642072,
"alnum_prop": 0.5513634736068447,
"repo_name": "Juniper/nova",
"id": "2932a133972e5c136fe3856f5e10b5aaa89e54fe",
"size": "53703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_hypervisors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView
from horizon import usage
class ProjectOverview(usage.UsageView):
table_class = usage.TenantUsageTable
usage_class = usage.TenantUsage
template_name = 'nova/overview/usage.html'
def get_data(self):
super(ProjectOverview, self).get_data()
return self.usage.get_instances()
class WarningView(TemplateView):
template_name = "nova/_warning.html"
|
{
"content_hash": "01e52a0e28a6808d702cf48758418539",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 25.294117647058822,
"alnum_prop": 0.727906976744186,
"repo_name": "savi-dev/horizon",
"id": "7840b75d11a4c57abd1c00f10772aa8af31c13c7",
"size": "1239",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "horizon/dashboards/nova/overview/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "140188"
},
{
"name": "JavaScript",
"bytes": "347845"
},
{
"name": "Python",
"bytes": "944872"
},
{
"name": "Shell",
"bytes": "11194"
}
],
"symlink_target": ""
}
|
from order.utils import create_order_objects, is_orderable, resolve_labels, \
resolve_order_item_related_set_name, sanitize_order
def post_save(sender, instance, created, **kwargs):
"""
After save create order instance for sending instance for orderable models.
"""
# Only create order model instances for
# those modules specified in settings.
model_label = '.'.join([sender._meta.app_label, sender._meta.object_name])
labels = resolve_labels(model_label)
order_field_names = is_orderable(model_label)
if order_field_names:
orderitem_set = getattr(
instance,
resolve_order_item_related_set_name(labels)
)
if not orderitem_set.all():
fields = {}
for order_field_name in order_field_names:
fields[order_field_name] = 1
orderitem_set.model.objects.create(item=instance, **fields)
sanitize_order(orderitem_set.model)
def post_syncdb(sender, created_models, **kwargs):
for model in created_models:
label = '.'.join([model._meta.app_label, model._meta.object_name])
order_fields = is_orderable(label)
if order_fields:
create_order_objects(model, order_fields)
|
{
"content_hash": "43fc1a7663df6ac1f6fdda3043306cb5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 38.06060606060606,
"alnum_prop": 0.64171974522293,
"repo_name": "praekelt/django-order",
"id": "d04c9846b9f3b9f3fe06a327158c948f0a9bd2e2",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "order/signal_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12250"
}
],
"symlink_target": ""
}
|
import flask
import flask.ext.stacksentinel
app = flask.Flask(__name__)
app.debug = False
#
# Just a couple of example handlers. The second will generate a URL for testing.
#
@app.route('/')
def hello_world():
return flask.Response("Extension installed: %s" % flask.ext.stacksentinel, mimetype='text/plain')
@app.route('/cause-error')
def cause_error():
raise AttributeError('This is a test!')
#
# Configure StackSentinel client, extension -- replace values with your own
app.config['STACKSENTINEL_ACCOUNT_TOKEN'] = 'YOUR ACCOUNT TOKEN'
app.config['STACKSENTINEL_PROJECT_TOKEN'] = 'YOUR PROJECT TOKEN'
stacksentinel = flask.ext.stacksentinel.StackSentinelHandler(app)
if __name__ == '__main__':
app.run()
|
{
"content_hash": "19570b1b7eb9ec3675c824b92eab9a53",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 101,
"avg_line_length": 27.807692307692307,
"alnum_prop": 0.7233748271092669,
"repo_name": "StackSentinel/stacksentinel-flask",
"id": "889b981abdf7ade08c02874117bc68b98208cab5",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_flask_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6406"
}
],
"symlink_target": ""
}
|
import xlsxwriter
import math
# Create a function that will behave like a worksheet write() method.
#
# This function takes a float and if it is NaN then it writes a blank cell
# instead. It should take the parameters shown below and return the return
# value from the called worksheet write_*() method.
#
def ignore_nan(worksheet, row, col, number, format=None):
if math.isnan(number):
return worksheet.write_blank(row, col, None, format)
else:
# Return control to the calling write() method for any other number.
return None
# Set up the workbook as usual.
workbook = xlsxwriter.Workbook('user_types2.xlsx')
worksheet = workbook.add_worksheet()
# Add the write() handler/callback to the worksheet.
worksheet.add_write_handler(float, ignore_nan)
# Create some data to write.
my_data = [1, 2, float('nan'), 4, 5]
# Write the data. Note that write_row() calls write() so this will work as
# expected. Writing NaN values would raise a TypeError without the handler.
worksheet.write_row('A1', my_data)
workbook.close()
|
{
"content_hash": "553e0d8e0e8af408ea38a89f90c151c3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 32.96875,
"alnum_prop": 0.7251184834123223,
"repo_name": "jmcnamara/XlsxWriter",
"id": "1f4c3cbe1735ed87d645c9e39e39a99c3b2f8893",
"size": "1327",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/user_types2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
"""
Expose each GPU devices directly
"""
from __future__ import print_function, absolute_import, division
import functools
from numba import servicelib
from .driver import hsa as driver
class _culist(object):
"""A thread local list of GPU instances
"""
def __init__(self):
self._lst = None
@property
def _gpus(self):
if not self._lst:
self._lst = self._init_gpus()
return self._lst
def _init_gpus(self):
gpus = []
for com in driver.components:
gpus.append(CU(com))
return gpus
def __getitem__(self, item):
return self._gpus[item]
def append(self, item):
return self._gpus.append(item)
def __len__(self):
return len(self._gpus)
def __nonzero__(self):
return bool(self._gpus)
def __iter__(self):
return iter(self._gpus)
__bool__ = __nonzero__
def reset(self):
for gpu in self:
gpu.reset()
@property
def current(self):
"""Get the current GPU object associated with the thread
"""
return _custack.top
cus = _culist()
del _culist
class CU(object):
def __init__(self, cu):
self._cu = cu
self._context = None
def __getattr__(self, key):
"""Redirect to self._gpu
"""
if key.startswith('_'):
raise AttributeError(key)
return getattr(self._cu, key)
def __repr__(self):
return repr(self._cu)
def associate_context(self):
"""Associate the context of this GPU to the running thread
"""
# No context was created for this GPU
if self._context is None:
self._context = self._cu.create_context()
return self._context
def __enter__(self):
self.associate_context()
_custack.push(self)
def __exit__(self, exc_type, exc_val, exc_tb):
assert _get_device() is self
self._context.pop()
_custack.pop()
def reset(self):
if self._context:
self._context.reset()
self._context = None
def get_gpu(i):
return cus[i]
_custack = servicelib.TLStack()
def _get_device(devnum=0):
"""Get the current device or use a device by device number.
"""
if not _custack:
_custack.push(get_gpu(devnum))
return _custack.top
def get_context(devnum=0):
"""Get the current device or use a device by device number, and
return the CUDA context.
"""
return _get_device(devnum=devnum).associate_context()
def require_context(fn):
"""
A decorator to ensure a context for the CUDA subsystem
"""
@functools.wraps(fn)
def _require_cu_context(*args, **kws):
get_context()
return fn(*args, **kws)
return _require_cu_context
def reset():
cus.reset()
_custack.clear()
|
{
"content_hash": "17e4f0b87b41e4241807acb4e6017bb8",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 67,
"avg_line_length": 20.66906474820144,
"alnum_prop": 0.569439610163592,
"repo_name": "pombredanne/numba",
"id": "2daf05691164a47a05d3b132a154f83cd4ae84f8",
"size": "2873",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numba/hsa/hsadrv/devices.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "249112"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3320040"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import bbcode
from pybb import defaults
from pybb.util import load_class
from pybb.engines.base import BaseMarkupEngine, BaseQuoteEngine
class BBCodeMarkupEngine(BaseMarkupEngine):
simple_formatters = {
'left': ('<div style="text-align:left;">%(value)s</div>', None),
'center': ('<div style="text-align:center;">%(value)s</div>', None),
'right': ('<div style="text-align:right;">%(value)s</div>', None),
'ul': ('<ul>%(value)s</ul>', None),
'ol': ('<ol>%(value)s</ol>', None),
'li': ('<li>%(value)s</li>', None),
'youtube': ('<iframe width="560" height="315" frameborder="0" src="http://www.youtube.com/embed/%(value)s?wmode=opaque" data-youtube-id="%(value)s" allowfullscreen=""></iframe>', None),
}
formatters = {
'url': ('pybb.engines.bbcode.formatters.url', None),
'img': ('pybb.engines.bbcode.formatters.img', {'render_embedded': False}),
'spoiler': ('pybb.engines.bbcode.formatters.spoiler', None),
'size': ('pybb.engines.bbcode.formatters.font_size', None),
'font': ('pybb.engines.bbcode.formatters.font_family', None),
'email': ('pybb.engines.bbcode.formatters.email', None),
}
defaults_kwargs = {
'replace_links': False,
'escape_html': False
}
def __init__(self, *args, **kwargs):
super(BBCodeMarkupEngine, self).__init__(*args, **kwargs)
self.parser = bbcode.Parser(**self.defaults_kwargs)
self.init_formatters()
def init_formatters(self):
simple_formatters = list(self.simple_formatters.items()) + list(defaults.PYBB_BBCODE_MARKUP_SIMPLE_FORMATTERS)
for tag_name, (format_str, context) in simple_formatters:
if context:
self.parser.add_simple_formatter(tag_name, format_str, **context)
else:
self.parser.add_simple_formatter(tag_name, format_str)
formatters = list(self.formatters.items()) + list(defaults.PYBB_BBCODE_MARKUP_FORMATTERS)
for tag_name, (formatter_name, context) in formatters:
if context:
self.parser.add_formatter(tag_name, load_class(formatter_name), **context)
else:
self.parser.add_formatter(tag_name, load_class(formatter_name))
def render(self, context=None):
if not context:
context = {}
context['obj'] = self.obj
return self.parser.format(self.message, **context)
class BBCodeQuoteEngine(BaseQuoteEngine):
def render(self):
return '[quote="%s;%d"]%s[/quote]\n' % (self.username,
self.post.pk,
self.post.body)
|
{
"content_hash": "739c700a9239b9283c0533ee45da6d76",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 193,
"avg_line_length": 38.43055555555556,
"alnum_prop": 0.5919768702565956,
"repo_name": "thoas/pybbm",
"id": "781c1e2b54f57d37a52c8cfa85b8e93459aedaf3",
"size": "2767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pybb/engines/bbcode/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3644"
},
{
"name": "HTML",
"bytes": "55789"
},
{
"name": "JavaScript",
"bytes": "11346"
},
{
"name": "Makefile",
"bytes": "218"
},
{
"name": "Python",
"bytes": "362855"
}
],
"symlink_target": ""
}
|
from ggrc import db
from ggrc.models.mixins import Base
class Event(Base, db.Model):
__tablename__ = 'events'
action = db.Column(
db.Enum(u'POST', u'PUT', u'DELETE', u'BULK', u'GET'),
nullable=False,
)
resource_id = db.Column(db.Integer)
resource_type = db.Column(db.String)
revisions = db.relationship(
'Revision',
backref='event',
cascade='all, delete-orphan',
)
_publish_attrs = [
'action',
'resource_id',
'resource_type',
'revisions',
]
_include_links = [
'revisions',
]
@staticmethod
def _extra_table_args(class_):
return (
db.Index('events_modified_by', 'modified_by_id'),
db.Index(
'ix_{}_updated_at'.format(class_.__tablename__),
'updated_at',
),
)
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Event, cls).eager_query()
return query.options(
orm.subqueryload('revisions').undefer_group('Revision_complete'),
)
|
{
"content_hash": "54535505c1f0b7916549214c01955a7a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 73,
"avg_line_length": 20.979591836734695,
"alnum_prop": 0.5836575875486382,
"repo_name": "j0gurt/ggrc-core",
"id": "9535f5f89733b2aee97554c14fea42f1cfd3c47e",
"size": "1141",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "194475"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1087442"
},
{
"name": "JavaScript",
"bytes": "1770318"
},
{
"name": "Makefile",
"bytes": "7148"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2540086"
},
{
"name": "Shell",
"bytes": "30902"
}
],
"symlink_target": ""
}
|
from test_framework.blocktools import (
COINBASE_MATURITY,
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
SEQUENCE_FINAL,
)
from test_framework.script import (
ANNEX_TAG,
BIP341_sha_amounts,
BIP341_sha_outputs,
BIP341_sha_prevouts,
BIP341_sha_scriptpubkeys,
BIP341_sha_sequences,
CScript,
CScriptNum,
CScriptOp,
hash256,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureMsg,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureMsg,
TaggedHash,
TaprootSignatureMsg,
is_op_success,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2pkh_script,
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
random_bytes,
)
from test_framework.key import (
generate_privkey,
compute_xonly_pubkey,
sign_schnorr,
tweak_add_privkey,
ECKey,
SECP256K1
)
from test_framework.address import (
hash160,
program_to_witness,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# Whether or not to output generated test vectors, in JSON format.
GEN_TEST_VECTORS = False
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
"""Default expression for "pubkey_internal": tap.internal_pubkey."""
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sigmsg(ctx):
"""Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0]
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute tagged hash or dsha256 of sigmsg."""
msg = get(ctx, "sigmsg")
mode = get(ctx, "mode")
if mode == "taproot":
return TaggedHash("TapSighash", msg)
else:
if msg is None:
return (1).to_bytes(32, 'little')
else:
return hash256(msg)
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
deterministic = get(ctx, "deterministic")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
aux = bytes([0] * 32)
if not deterministic:
aux = random.getrandbits(256).to_bytes(32, 'big')
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p, aux=aux)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash, rfc6979=deterministic)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sigmsg value (preimage of sighash)
"sigmsg": default_sigmsg,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# Use deterministic signing nonces
"deterministic": False,
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
* need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding
transaction output.
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = key_to_p2wpkh_script(pkh)
conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Test involving an internal public key not on the curve ==
# X-only public keys are 32 bytes, but not every 32-byte array is a valid public key; only
# around 50% of them are. This does not affect users using correct software; these "keys" have
# no corresponding private key, and thus will never appear as output of key
# generation/derivation/tweaking.
#
# Using an invalid public key as P2TR output key makes the UTXO unspendable. Revealing an
# invalid public key as internal key in a P2TR script path spend also makes the spend invalid.
# These conditions are explicitly spelled out in BIP341.
#
# It is however hard to create test vectors for this, because it involves "guessing" how a
# hypothetical incorrect implementation deals with an obviously-invalid condition, and making
# sure that guessed behavior (accepting it in certain condition) doesn't occur.
#
# The test case added here tries to detect a very specific bug a verifier could have: if they
# don't verify whether or not a revealed internal public key in a script path spend is valid,
# and (correctly) implement output_key == tweak(internal_key, tweakval) but (incorrectly) treat
# tweak(invalid_key, tweakval) as equal the public key corresponding to private key tweakval.
# This may seem like a far-fetched edge condition to test for, but in fact, the BIP341 wallet
# pseudocode did exactly that (but obviously only triggerable by someone invoking the tweaking
# function with an invalid public key, which shouldn't happen).
# Generate an invalid public key
while True:
invalid_pub = random_bytes(32)
if not SECP256K1.is_x_coord(int.from_bytes(invalid_pub, 'big')):
break
# Implement a test case that detects validation logic which maps invalid public keys to the
# point at infinity in the tweaking logic.
tap = taproot_construct(invalid_pub, [("true", CScript([OP_1]))], treat_internal_as_infinity=True)
add_spender(spenders, "output/invalid_x", tap=tap, key_tweaked=tap.tweak, failure={"leaf": "true", "inputs": []}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Do the same thing without invalid point, to make sure there is no mistake in the test logic.
tap = taproot_construct(pubs[0], [("true", CScript([OP_1]))])
add_spender(spenders, "output/invalid_x_mock", tap=tap, key=secs[0], leaf="true", inputs=[])
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Test case for https://github.com/bitcoin/bitcoin/issues/24765 ==
zero_fn = lambda h: bytes([0 for _ in range(32)])
tap = taproot_construct(pubs[0], [("leaf", CScript([pubs[1], OP_CHECKSIG, pubs[1], OP_CHECKSIGADD, OP_2, OP_EQUAL])), zero_fn])
add_spender(spenders, "case24765", tap=tap, leaf="leaf", inputs=[getter("sign"), getter("sign")], key=secs[1], no_fail=True)
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=key_to_p2pk_script(pubkey1), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_nonstandard():
"""Spenders for testing that post-activation Taproot rules may be nonstandard."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that features like annex, leaf versions, or OP_SUCCESS are valid but non-standard
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-par=1"]]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
coinbase_tx = create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees)
block = create_block(self.tip, coinbase_tx, self.lastblocktime + 1, txlist=txs)
witness and add_witness_commitment(block)
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def init_blockinfo(self, node):
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
self.init_blockinfo(node)
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = (
fail_input is None # Must be valid to be standard
and (all(utxo.spender.is_standard for utxo in input_utxos)) # All inputs must be standard
and tx.nVersion >= 1 # The tx version must be standard
and tx.nVersion <= 2)
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def gen_test_vectors(self):
"""Run a scenario that corresponds (and optionally produces) to BIP341 test vectors."""
self.log.info("Unit test scenario...")
# Deterministically mine coins to OP_TRUE in block 1
assert_equal(self.nodes[0].getblockcount(), 0)
coinbase = CTransaction()
coinbase.nVersion = 1
coinbase.vin = [CTxIn(COutPoint(0, 0xffffffff), CScript([OP_1, OP_1]), SEQUENCE_FINAL)]
coinbase.vout = [CTxOut(5000000000, CScript([OP_1]))]
coinbase.nLockTime = 0
coinbase.rehash()
assert coinbase.hash == "f60c73405d499a956d3162e3483c395526ef78286458a4cb17b125aa92e49b20"
# Mine it
block = create_block(hashprev=int(self.nodes[0].getbestblockhash(), 16), coinbase=coinbase)
block.rehash()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getblockcount(), 1)
self.generate(self.nodes[0], COINBASE_MATURITY)
SEED = 317
VALID_LEAF_VERS = list(range(0xc0, 0x100, 2)) + [0x66, 0x7e, 0x80, 0x84, 0x96, 0x98, 0xba, 0xbc, 0xbe]
# Generate private keys
prvs = [hashlib.sha256(SEED.to_bytes(2, 'big') + bytes([i])).digest() for i in range(100)]
# Generate corresponding public x-only pubkeys
pubs = [compute_xonly_pubkey(prv)[0] for prv in prvs]
# Generate taproot objects
inner_keys = [pubs[i] for i in range(7)]
script_lists = [
None,
[("0", CScript([pubs[50], OP_CHECKSIG]), 0xc0)],
[("0", CScript([pubs[51], OP_CHECKSIG]), 0xc0)],
[("0", CScript([pubs[52], OP_CHECKSIG]), 0xc0), ("1", CScript([b"BIP341"]), VALID_LEAF_VERS[pubs[99][0] % 41])],
[("0", CScript([pubs[53], OP_CHECKSIG]), 0xc0), ("1", CScript([b"Taproot"]), VALID_LEAF_VERS[pubs[99][1] % 41])],
[("0", CScript([pubs[54], OP_CHECKSIG]), 0xc0), [("1", CScript([pubs[55], OP_CHECKSIG]), 0xc0), ("2", CScript([pubs[56], OP_CHECKSIG]), 0xc0)]],
[("0", CScript([pubs[57], OP_CHECKSIG]), 0xc0), [("1", CScript([pubs[58], OP_CHECKSIG]), 0xc0), ("2", CScript([pubs[59], OP_CHECKSIG]), 0xc0)]],
]
taps = [taproot_construct(inner_keys[i], script_lists[i]) for i in range(len(inner_keys))]
# Require negated taps[0]
assert taps[0].negflag
# Require one negated and one non-negated in taps 1 and 2.
assert taps[1].negflag != taps[2].negflag
# Require one negated and one non-negated in taps 3 and 4.
assert taps[3].negflag != taps[4].negflag
# Require one negated and one non-negated in taps 5 and 6.
assert taps[5].negflag != taps[6].negflag
cblks = [{leaf: get({**DEFAULT_CONTEXT, 'tap': taps[i], 'leaf': leaf}, 'controlblock') for leaf in taps[i].leaves} for i in range(7)]
# Require one swapped and one unswapped in taps 3 and 4.
assert (cblks[3]['0'][33:65] < cblks[3]['1'][33:65]) != (cblks[4]['0'][33:65] < cblks[4]['1'][33:65])
# Require one swapped and one unswapped in taps 5 and 6, both at the top and child level.
assert (cblks[5]['0'][33:65] < cblks[5]['1'][65:]) != (cblks[6]['0'][33:65] < cblks[6]['1'][65:])
assert (cblks[5]['1'][33:65] < cblks[5]['2'][33:65]) != (cblks[6]['1'][33:65] < cblks[6]['2'][33:65])
# Require within taps 5 (and thus also 6) that one level is swapped and the other is not.
assert (cblks[5]['0'][33:65] < cblks[5]['1'][65:]) != (cblks[5]['1'][33:65] < cblks[5]['2'][33:65])
# Compute a deterministic set of scriptPubKeys
tap_spks = []
old_spks = []
spend_info = {}
# First, taproot scriptPubKeys, for the tap objects constructed above
for i, tap in enumerate(taps):
tap_spks.append(tap.scriptPubKey)
d = {'key': prvs[i], 'tap': tap, 'mode': 'taproot'}
spend_info[tap.scriptPubKey] = d
# Then, a number of deterministically generated (keys 0x1,0x2,0x3) with 2x P2PKH, 1x P2WPKH spks.
for i in range(1, 4):
prv = ECKey()
prv.set(i.to_bytes(32, 'big'), True)
pub = prv.get_pubkey().get_bytes()
d = {"key": prv}
d["scriptcode"] = key_to_p2pkh_script(pub)
d["inputs"] = [getter("sign"), pub]
if i < 3:
# P2PKH
d['spk'] = key_to_p2pkh_script(pub)
d['mode'] = 'legacy'
else:
# P2WPKH
d['spk'] = key_to_p2wpkh_script(pub)
d['mode'] = 'witv0'
old_spks.append(d['spk'])
spend_info[d['spk']] = d
# Construct a deterministic chain of transactions creating UTXOs to the test's spk's (so that they
# come from distinct txids).
txn = []
lasttxid = coinbase.sha256
amount = 5000000000
for i, spk in enumerate(old_spks + tap_spks):
val = 42000000 * (i + 7)
tx = CTransaction()
tx.nVersion = 1
tx.vin = [CTxIn(COutPoint(lasttxid, i & 1), CScript([]), SEQUENCE_FINAL)]
tx.vout = [CTxOut(val, spk), CTxOut(amount - val, CScript([OP_1]))]
if i & 1:
tx.vout = list(reversed(tx.vout))
tx.nLockTime = 0
tx.rehash()
amount -= val
lasttxid = tx.sha256
txn.append(tx)
spend_info[spk]['prevout'] = COutPoint(tx.sha256, i & 1)
spend_info[spk]['utxo'] = CTxOut(val, spk)
# Mine those transactions
self.init_blockinfo(self.nodes[0])
self.block_submit(self.nodes[0], txn, "Crediting txn", None, sigops_weight=10, accept=True)
# scriptPubKey computation
tests = {"version": 1}
spk_tests = tests.setdefault("scriptPubKey", [])
for i, tap in enumerate(taps):
test_case = {}
given = test_case.setdefault("given", {})
given['internalPubkey'] = tap.internal_pubkey.hex()
def pr(node):
if node is None:
return None
elif isinstance(node, tuple):
return {"id": int(node[0]), "script": node[1].hex(), "leafVersion": node[2]}
elif len(node) == 1:
return pr(node[0])
elif len(node) == 2:
return [pr(node[0]), pr(node[1])]
else:
assert False
given['scriptTree'] = pr(script_lists[i])
intermediary = test_case.setdefault("intermediary", {})
if len(tap.leaves):
leafhashes = intermediary.setdefault('leafHashes', [None] * len(tap.leaves))
for leaf in tap.leaves:
leafhashes[int(leaf)] = tap.leaves[leaf].leaf_hash.hex()
intermediary['merkleRoot'] = tap.merkle_root.hex() if tap.merkle_root else None
intermediary['tweak'] = tap.tweak.hex()
intermediary['tweakedPubkey'] = tap.output_pubkey.hex()
expected = test_case.setdefault("expected", {})
expected['scriptPubKey'] = tap.scriptPubKey.hex()
expected['bip350Address'] = program_to_witness(1, bytes(tap.output_pubkey), True)
if len(tap.leaves):
control_blocks = expected.setdefault("scriptPathControlBlocks", [None] * len(tap.leaves))
for leaf in tap.leaves:
ctx = {**DEFAULT_CONTEXT, 'tap': tap, 'leaf': leaf}
control_blocks[int(leaf)] = get(ctx, "controlblock").hex()
spk_tests.append(test_case)
# Construct a deterministic transaction spending all outputs created above.
tx = CTransaction()
tx.nVersion = 2
tx.vin = []
inputs = []
input_spks = [tap_spks[0], tap_spks[1], old_spks[0], tap_spks[2], tap_spks[5], old_spks[2], tap_spks[6], tap_spks[3], tap_spks[4]]
sequences = [0, SEQUENCE_FINAL, SEQUENCE_FINAL, 0xfffffffe, 0xfffffffe, 0, 0, SEQUENCE_FINAL, SEQUENCE_FINAL]
hashtypes = [SIGHASH_SINGLE, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, SIGHASH_ALL, SIGHASH_ALL, SIGHASH_DEFAULT, SIGHASH_ALL, SIGHASH_NONE, SIGHASH_NONE|SIGHASH_ANYONECANPAY, SIGHASH_ALL|SIGHASH_ANYONECANPAY]
for i, spk in enumerate(input_spks):
tx.vin.append(CTxIn(spend_info[spk]['prevout'], CScript(), sequences[i]))
inputs.append(spend_info[spk]['utxo'])
tx.vout.append(CTxOut(1000000000, old_spks[1]))
tx.vout.append(CTxOut(3410000000, pubs[98]))
tx.nLockTime = 500000000
precomputed = {
"hashAmounts": BIP341_sha_amounts(inputs),
"hashPrevouts": BIP341_sha_prevouts(tx),
"hashScriptPubkeys": BIP341_sha_scriptpubkeys(inputs),
"hashSequences": BIP341_sha_sequences(tx),
"hashOutputs": BIP341_sha_outputs(tx)
}
keypath_tests = tests.setdefault("keyPathSpending", [])
tx_test = {}
global_given = tx_test.setdefault("given", {})
global_given['rawUnsignedTx'] = tx.serialize().hex()
utxos_spent = global_given.setdefault("utxosSpent", [])
for i in range(len(input_spks)):
utxos_spent.append({"scriptPubKey": inputs[i].scriptPubKey.hex(), "amountSats": inputs[i].nValue})
global_intermediary = tx_test.setdefault("intermediary", {})
for key in sorted(precomputed.keys()):
global_intermediary[key] = precomputed[key].hex()
test_list = tx_test.setdefault('inputSpending', [])
for i in range(len(input_spks)):
ctx = {
**DEFAULT_CONTEXT,
**spend_info[input_spks[i]],
'tx': tx,
'utxos': inputs,
'idx': i,
'hashtype': hashtypes[i],
'deterministic': True
}
if ctx['mode'] == 'taproot':
test_case = {}
given = test_case.setdefault("given", {})
given['txinIndex'] = i
given['internalPrivkey'] = get(ctx, 'key').hex()
if get(ctx, "tap").merkle_root != bytes():
given['merkleRoot'] = get(ctx, "tap").merkle_root.hex()
else:
given['merkleRoot'] = None
given['hashType'] = get(ctx, "hashtype")
intermediary = test_case.setdefault("intermediary", {})
intermediary['internalPubkey'] = get(ctx, "tap").internal_pubkey.hex()
intermediary['tweak'] = get(ctx, "tap").tweak.hex()
intermediary['tweakedPrivkey'] = get(ctx, "key_tweaked").hex()
sigmsg = get(ctx, "sigmsg")
intermediary['sigMsg'] = sigmsg.hex()
intermediary['precomputedUsed'] = [key for key in sorted(precomputed.keys()) if sigmsg.count(precomputed[key])]
intermediary['sigHash'] = get(ctx, "sighash").hex()
expected = test_case.setdefault("expected", {})
expected['witness'] = [get(ctx, "sign").hex()]
test_list.append(test_case)
tx.wit.vtxinwit.append(CTxInWitness())
tx.vin[i].scriptSig = CScript(flatten(get(ctx, "scriptsig")))
tx.wit.vtxinwit[i].scriptWitness.stack = flatten(get(ctx, "witness"))
aux = tx_test.setdefault("auxiliary", {})
aux['fullySignedTx'] = tx.serialize().hex()
keypath_tests.append(tx_test)
assert_equal(hashlib.sha256(tx.serialize()).hexdigest(), "24bab662cb55a7f3bae29b559f651674c62bcc1cd442d44715c0133939107b38")
# Mine the spending transaction
self.block_submit(self.nodes[0], [tx], "Spending txn", None, sigops_weight=10000, accept=True, witness=True)
if GEN_TEST_VECTORS:
print(json.dumps(tests, indent=4, sort_keys=False))
def run_test(self):
self.gen_test_vectors()
self.log.info("Post-activation tests...")
self.test_spenders(self.nodes[0], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
|
{
"content_hash": "a826c275d7ad0d9e46f0c8bb24e0f48a",
"timestamp": "",
"source": "github",
"line_count": 1758,
"max_line_length": 363,
"avg_line_length": 57.377133105802045,
"alnum_prop": 0.6373712438905907,
"repo_name": "fanquake/bitcoin",
"id": "31a6b31225a082c4a61562f014b7799de35ff543",
"size": "101123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/feature_taproot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "C",
"bytes": "1305140"
},
{
"name": "C++",
"bytes": "10324006"
},
{
"name": "CMake",
"bytes": "29182"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "220899"
},
{
"name": "Makefile",
"bytes": "147451"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "3018212"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "58534"
},
{
"name": "Scheme",
"bytes": "26038"
},
{
"name": "Shell",
"bytes": "169198"
}
],
"symlink_target": ""
}
|
"""Tests for the helper for building projects from source."""
import os
import unittest
from l2tdevtools import build_helper
from l2tdevtools import projects
from tests import test_lib
class BuildHelperFactoryTest(test_lib.BaseTestCase):
"""Tests the factory class for build helpers."""
def testNewBuildHelper(self):
"""Tests the NewBuildHelper function."""
project_definition = projects.ProjectDefinition('test')
l2tdevtools_path = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
build_helper_object = build_helper.BuildHelperFactory.NewBuildHelper(
project_definition, 'source', l2tdevtools_path, {})
self.assertIsNone(build_helper_object)
project_definition.build_system = 'setup_py'
build_helper_object = build_helper.BuildHelperFactory.NewBuildHelper(
project_definition, 'source', l2tdevtools_path, {})
self.assertIsNotNone(build_helper_object)
build_helper_object = build_helper.BuildHelperFactory.NewBuildHelper(
project_definition, 'bogus', l2tdevtools_path, {})
self.assertIsNone(build_helper_object)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "59b49724a9b012e8339b153f52a2af61",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 31.72972972972973,
"alnum_prop": 0.7291311754684838,
"repo_name": "log2timeline/l2tdevtools",
"id": "8d39a99d4c799edcd1d4cce6f7153025b1e8f81a",
"size": "1220",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/build_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Dockerfile",
"bytes": "1685"
},
{
"name": "Makefile",
"bytes": "5053"
},
{
"name": "PowerShell",
"bytes": "11811"
},
{
"name": "Python",
"bytes": "601304"
},
{
"name": "Shell",
"bytes": "15227"
}
],
"symlink_target": ""
}
|
import multiprocessing
import random
import threading
import time
from concurrent import futures
import grpc
from src.proto.grpc.testing import control_pb2
from src.proto.grpc.testing import services_pb2_grpc
from src.proto.grpc.testing import stats_pb2
from tests.qps import benchmark_client
from tests.qps import benchmark_server
from tests.qps import client_runner
from tests.qps import histogram
from tests.unit import resources
from tests.unit import test_common
class WorkerServer(services_pb2_grpc.WorkerServiceServicer):
"""Python Worker Server implementation."""
def __init__(self):
self._quit_event = threading.Event()
def RunServer(self, request_iterator, context):
config = next(request_iterator).setup
server, port = self._create_server(config)
cores = multiprocessing.cpu_count()
server.start()
start_time = time.time()
yield self._get_server_status(start_time, start_time, port, cores)
for request in request_iterator:
end_time = time.time()
status = self._get_server_status(start_time, end_time, port, cores)
if request.mark.reset:
start_time = end_time
yield status
server.stop(None)
def _get_server_status(self, start_time, end_time, port, cores):
end_time = time.time()
elapsed_time = end_time - start_time
stats = stats_pb2.ServerStats(
time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ServerStatus(stats=stats, port=port, cores=cores)
def _create_server(self, config):
if config.async_server_threads == 0:
# This is the default concurrent.futures thread pool size, but
# None doesn't seem to work
server_threads = multiprocessing.cpu_count() * 5
else:
server_threads = config.async_server_threads
server = test_common.test_server(max_workers=server_threads)
if config.server_type == control_pb2.ASYNC_SERVER:
servicer = benchmark_server.BenchmarkServer()
services_pb2_grpc.add_BenchmarkServiceServicer_to_server(
servicer, server)
elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
resp_size = config.payload_config.bytebuf_params.resp_size
servicer = benchmark_server.GenericBenchmarkServer(resp_size)
method_implementations = {
'StreamingCall':
grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
'UnaryCall':
grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
}
handler = grpc.method_handlers_generic_handler(
'grpc.testing.BenchmarkService', method_implementations)
server.add_generic_rpc_handlers((handler,))
else:
raise Exception('Unsupported server type {}'.format(
config.server_type))
if config.HasField('security_params'): # Use SSL
server_creds = grpc.ssl_server_credentials(
((resources.private_key(), resources.certificate_chain()),))
port = server.add_secure_port('[::]:{}'.format(config.port),
server_creds)
else:
port = server.add_insecure_port('[::]:{}'.format(config.port))
return (server, port)
def RunClient(self, request_iterator, context):
config = next(request_iterator).setup
client_runners = []
qps_data = histogram.Histogram(config.histogram_params.resolution,
config.histogram_params.max_possible)
start_time = time.time()
# Create a client for each channel
for i in xrange(config.client_channels):
server = config.server_targets[i % len(config.server_targets)]
runner = self._create_client_runner(server, config, qps_data)
client_runners.append(runner)
runner.start()
end_time = time.time()
yield self._get_client_status(start_time, end_time, qps_data)
# Respond to stat requests
for request in request_iterator:
end_time = time.time()
status = self._get_client_status(start_time, end_time, qps_data)
if request.mark.reset:
qps_data.reset()
start_time = time.time()
yield status
# Cleanup the clients
for runner in client_runners:
runner.stop()
def _get_client_status(self, start_time, end_time, qps_data):
latencies = qps_data.get_data()
end_time = time.time()
elapsed_time = end_time - start_time
stats = stats_pb2.ClientStats(
latencies=latencies,
time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ClientStatus(stats=stats)
def _create_client_runner(self, server, config, qps_data):
if config.client_type == control_pb2.SYNC_CLIENT:
if config.rpc_type == control_pb2.UNARY:
client = benchmark_client.UnarySyncBenchmarkClient(
server, config, qps_data)
elif config.rpc_type == control_pb2.STREAMING:
client = benchmark_client.StreamingSyncBenchmarkClient(
server, config, qps_data)
elif config.client_type == control_pb2.ASYNC_CLIENT:
if config.rpc_type == control_pb2.UNARY:
client = benchmark_client.UnaryAsyncBenchmarkClient(
server, config, qps_data)
else:
raise Exception('Async streaming client not supported')
else:
raise Exception('Unsupported client type {}'.format(
config.client_type))
# In multi-channel tests, we split the load across all channels
load_factor = float(config.client_channels)
if config.load_params.WhichOneof('load') == 'closed_loop':
runner = client_runner.ClosedLoopClientRunner(
client, config.outstanding_rpcs_per_channel)
else: # Open loop Poisson
alpha = config.load_params.poisson.offered_load / load_factor
def poisson():
while True:
yield random.expovariate(alpha)
runner = client_runner.OpenLoopClientRunner(client, poisson())
return runner
def CoreCount(self, request, context):
return control_pb2.CoreResponse(cores=multiprocessing.cpu_count())
def QuitWorker(self, request, context):
self._quit_event.set()
return control_pb2.Void()
def wait_for_quit(self):
self._quit_event.wait()
|
{
"content_hash": "1009a710dbed5c6ff1fdb9455a859032",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 79,
"avg_line_length": 39.901162790697676,
"alnum_prop": 0.6124143960367187,
"repo_name": "murgatroid99/grpc",
"id": "41e2403c8fc9ad2a1caba88c3b3487fab299ae4f",
"size": "7441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/qps/worker_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "24926"
},
{
"name": "C",
"bytes": "1453210"
},
{
"name": "C#",
"bytes": "1646979"
},
{
"name": "C++",
"bytes": "28778506"
},
{
"name": "CMake",
"bytes": "481419"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Go",
"bytes": "27069"
},
{
"name": "JavaScript",
"bytes": "48756"
},
{
"name": "M4",
"bytes": "43234"
},
{
"name": "Makefile",
"bytes": "1064586"
},
{
"name": "Objective-C",
"bytes": "266834"
},
{
"name": "Objective-C++",
"bytes": "21939"
},
{
"name": "PHP",
"bytes": "340922"
},
{
"name": "Python",
"bytes": "2227399"
},
{
"name": "Ruby",
"bytes": "799613"
},
{
"name": "Shell",
"bytes": "412607"
},
{
"name": "Swift",
"bytes": "3486"
}
],
"symlink_target": ""
}
|
import logging
from pandas import Series, DataFrame, concat
from ramp.builders import build_target_safe
from ramp.features.base import to_feature, ComboFeature, Feature, AllDataFeature
from ramp.modeling import fit_model, generate_test
from ramp.utils import get_single_column, reindex_safe
class TrainedFeature(Feature):
def __init__(self):
# For trained features, we will need access to all the data
self.feature = AllDataFeature()
super(TrainedFeature, self).__init__(self.feature)
class Predictions(TrainedFeature):
# TODO: update for new context
def __init__(self, model_def, name=None, external_data=None,
cv_folds=None):
"""
If cv-folds is specified, will use k-fold cross-validation to
provide robust predictions.
(The predictions returned are those predicted on hold-out sets only.)
Will not provide overly-optimistic fit like Predictions will, but can
increase runtime significantly (nested cross-validation).
Can be int or iteratable of (train, test) indices
"""
self.cv_folds = cv_folds
self.model_def = model_def
self.external_data = external_data
super(Predictions, self).__init__()
#TODO
# if self.external_context is not none:
# # dont need to retrain if using external dataset to train
# self.trained = false
if not name:
name = 'predictions'
self._name = '%s[%s,%d features]'%(name,
model_def.estimator.__class__.__name__, len(model_def.features))
def _train(self, train_data):
x, y, fitted_model = fit_model(self.model_def, train_data)
return fitted_model
def _apply(self, data, fitted_feature):
fitted_model = fitted_feature.trained_data
#TODO
# if self.cv_folds:
# if isinstance(self.cv_folds, int):
# folds = make_folds(context.train_index, self.cv_folds)
# else:
# folds = self.cv_folds
# preds = []
# for train, test in folds:
# ctx = context.copy()
# ctx.train_index = train
# preds.append(self._predict(ctx, test, fit_model=True))
# # if there is held-out data, use all of train to predict
# # (these predictions use more data, so will be "better",
# # not sure if that is problematic...)
# remaining = context.data.index - context.train_index
# if len(remaining):
# preds.append(self._predict(context, remaining))
# preds = concat(preds, axis=0)
# else:
preds = self._predict(fitted_model, data)
preds = DataFrame(preds, index=data.index)
return preds
def _predict(self, fitted_model, predict_data):
x_test, y_true = generate_test(self.model_def, predict_data, fitted_model)
y_preds = self.model_def.estimator.predict(x_test)
return y_preds
def make_cross_validated_models(self, data, fitted_feature):
pass
class Residuals(Predictions):
def _predict(self, fitted_model, predict_data):
x_test, y_true = generate_test(self.model_def, predict_data, fitted_model)
y_preds = self.model_def.estimator.predict(x_test)
return y_preds - y_true
class FeatureSelector(ComboFeature):
def __init__(self, features, selector, target, data, n_keep=50,
threshold_arg=None):
"""
"""
super(FeatureSelector, self).__init__(features)
self.selector = selector
self.n_keep = n_keep
self.threshold_arg = threshold_arg
self.target = target
self.data = data
self._name = self._name + '_%d_%s'%(threshold_arg or n_keep, selector.__class__.__name__)
def _train(self, train_datas):
train_data = concat(train_datas, axis=1)
y, ff = build_target_safe(self.target, self.data)
y = reindex_safe(y, train_data.index)
arg = self.threshold_arg
if arg is None:
arg = self.n_keep
cols = self.selector.select(train_data, y, arg)
return cols
def _combine_apply(self, datas, fitted_feature):
data = concat(datas, axis=1)
selected_columns = fitted_feature.trained_data
return data[selected_columns]
class TargetAggregationByFactor(TrainedFeature):
"""
"""
def __init__(self, group_by, func=None, target=None,
min_sample=10, regularize=True):
super(TargetAggregationByFactor, self).__init__()
self.group_by = group_by
self.func = func
self.target = to_feature(target)
self.min_sample = min_sample
self.regularize = regularize
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.group_by, self.target)
def _train(self, train_data):
y, ff = build_target_safe(self.target, train_data)
train_data['__target'] = y
global_value = self.func(y)
if self.regularize:
keys = train_data[self.group_by].unique()
f = lambda x: (self.func(x) * x.size + global_value * self.min_sample) / (x.size + self.min_sample)
vals = train_data.groupby(self.group_by).agg({'__target': f})['__target'].to_dict()
else:
vc = train_data[self.group_by].value_counts()
keys = [k for k, v in vc.iterkv() if v >= self.min_sample]
train_data['__grouping'] = train_data[self.group_by].map(lambda x: x if x in keys else '__other')
vals = train_data.groupby('__grouping').agg({'__target': self.func})['__target'].to_dict()
del train_data['__grouping']
if '__other' not in vals:
vals['__other'] = global_value
logging.debug("Preparing Target Aggregations:")
logging.debug(str(vals.items()[:10]))
del train_data['__target']
return vals
def _apply(self, data, fitted_feature):
vals = fitted_feature.trained_data
logging.debug("Loading Target aggs")
logging.debug(str(vals.items()[:10]))
# logging.debug(str(keys))
logging.debug(str(data.columns))
data = data[[self.group_by]]
data = data.applymap(lambda x: vals.get(x, vals['__other']))
return data
|
{
"content_hash": "b526822ddcd17544685c69798f030197",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 111,
"avg_line_length": 38.593939393939394,
"alnum_prop": 0.5942211055276382,
"repo_name": "kvh/ramp",
"id": "51dfb026d4e5a419bbdc81193635afe2a58455b3",
"size": "6368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramp/features/trained.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182962"
}
],
"symlink_target": ""
}
|
import math
def classify_input(network, input_data, positive_classification_predicate):
input_shape = (network.layers[0].weights.shape[1], 1)
return positive_classification_predicate(network.compute_outputs(input_data.reshape(input_shape))[-1])
def measure_error(network, test_data_set):
errors = [network.compute_error(test_input, test_expectation) for test_input, test_expectation in test_data_set]
return math.fsum(map(lambda x: x ** 2, errors)) / len(errors)
def measure_precision(network, test_data_set, positive_classification_predicate):
test_outputs = [(network.compute_outputs(test_input)[-1], test_expectation)
for test_input, test_expectation
in test_data_set]
results = [positive_classification_predicate(test_expectation)
for test_output, test_expectation
in test_outputs
if positive_classification_predicate(test_output)]
return len([x for x in results if x])/len(results)
def measure_recall(network, test_data_set, positive_classification_predicate):
results = [classify_input(network, test_input, positive_classification_predicate)
for test_input, test_expectation
in test_data_set
if positive_classification_predicate(test_expectation)]
return len([x for x in results if x])/len(results)
|
{
"content_hash": "a0eecd255bcfedaf89cab7f256dd4848",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 116,
"avg_line_length": 45.93333333333333,
"alnum_prop": 0.6915820029027576,
"repo_name": "frugs/PyTextDocumentClassifier",
"id": "60d63a1b1e97b197eb77e19bae0a308e60e44274",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytdc/classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5666"
}
],
"symlink_target": ""
}
|
"""Provides device triggers for switches."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.device_automation import toggle_entity
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_SCHEMA = vol.All(
toggle_entity.TRIGGER_SCHEMA,
vol.Schema({vol.Required(CONF_DOMAIN): DOMAIN}, extra=vol.ALLOW_EXTRA),
)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
trigger_info: TriggerInfo,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
return await toggle_entity.async_attach_trigger(hass, config, action, trigger_info)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device triggers."""
return await toggle_entity.async_get_triggers(hass, device_id, DOMAIN)
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
return await toggle_entity.async_get_trigger_capabilities(hass, config)
|
{
"content_hash": "fe38aeb4b74e93d8a1b7a0ff6746329e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 32.02439024390244,
"alnum_prop": 0.753998476770754,
"repo_name": "mezz64/home-assistant",
"id": "499b04bbaf3ab729565d74a89a61aaa6cc8d0aa4",
"size": "1313",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for managing networks."""
import types
import inspect
import re
import uuid
import sys
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import Any, List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_module_src = dict() # Source code for temporary modules created during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
components: Container for sub-networks. Passed to the build func, and retained between calls.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
vars: All variables (local_name => var).
trainables: All trainable variables (local_name => var).
var_global_to_local: Mapping from variable global names to local names.
"""
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert func_name is not None
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Locate the user-specified network build function.
if util.is_top_level_function(func_name):
func_name = util.get_top_level_function_name(func_name)
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Dig up source code for the module containing the build function.
self._build_module_src = _import_module_src.get(module, None)
if self._build_module_src is None:
self._build_module_src = inspect.getsource(module)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.components = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.own_vars = OrderedDict()
self.vars = OrderedDict()
self.trainables = OrderedDict()
self.var_global_to_local = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
with tf.name_scope(None):
self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs["is_template_graph"] = True
build_kwargs["components"] = self.components
# Build template graph.
with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
assert tf.get_variable_scope().name == self.scope
assert tf.get_default_graph().get_name_scope() == self.scope
with tf.control_dependencies(None): # ignore surrounding control dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, **build_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Perform sanity checks.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
if any(not isinstance(comp, Network) for comp in self.components.values()):
raise ValueError("Components of a Network must be Networks themselves.")
if len(self.components) != len(set(comp.name for comp in self.components.values())):
raise ValueError("Components of a Network must have unique names.")
# List inputs and outputs.
self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
# List variables.
self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
self.vars = OrderedDict(self.own_vars)
self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
def reset_own_vars(self) -> None:
"""Re-initialize all variables of this network, excluding sub-networks."""
tfutil.run([var.initializer for var in self.own_vars.values()])
def reset_vars(self) -> None:
"""Re-initialize all variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Re-initialize all trainable variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs.update(dynamic_kwargs)
build_kwargs["is_template_graph"] = False
build_kwargs["components"] = self.components
# Build TensorFlow graph to evaluate the network.
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
out_expr = self._build_func(*final_inputs, **build_kwargs)
# Propagate input shapes back to the user-specified expressions.
for expr, final in zip(in_expr, final_inputs):
if isinstance(expr, tf.Tensor):
expr.set_shape(final.shape)
# Express outputs in the desired format.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, without any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
return self.var_global_to_local[global_name]
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
state = dict()
state["version"] = 3
state["name"] = self.name
state["static_kwargs"] = dict(self.static_kwargs)
state["components"] = dict(self.components)
state["build_module_src"] = self._build_module_src
state["build_func_name"] = self._build_func_name
state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
return state
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
# pylint: disable=attribute-defined-outside-init
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] in [2, 3]
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self.components = util.EasyDict(state.get("components", {}))
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Create temporary module from the imported source code.
module_name = "_tflib_network_import_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_import_module_src[module] = self._build_module_src
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
# Locate network build function in the temporary module.
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net.static_kwargs.update(new_static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_own_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, excluding sub-networks."""
names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, including sub-networks."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network, including sub-networks."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
input_transform: dict = None,
output_transform: dict = None,
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
assert input_transform is None or util.is_top_level_function(input_transform["func"])
assert output_transform is None or util.is_top_level_function(output_transform["func"])
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
# Construct unique hash key from all arguments that affect the TensorFlow graph.
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
def unwind_key(obj):
if isinstance(obj, dict):
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
if callable(obj):
return util.get_top_level_function_name(obj)
return obj
key = repr(unwind_key(key))
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = self.clone() if assume_frozen else self
in_gpu = in_split[gpu]
if input_transform is not None:
in_kwargs = dict(input_transform)
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
assert len(in_gpu) == self.num_inputs
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
if output_transform is not None:
out_kwargs = dict(output_transform)
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
assert len(out_gpu) == self.num_outputs
out_split.append(out_gpu)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
include_prefix = self.scope + "/"
exclude_prefix = include_prefix + "_"
ops = tf.get_default_graph().get_operations()
ops = [op for op in ops if op.name.startswith(include_prefix)]
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
return ops
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, parent_vars, level):
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
return
# Filter ops and vars by scope.
global_prefix = scope + "/"
local_prefix = global_prefix[len(self.scope) + 1:]
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
if not cur_ops and not cur_vars:
return
# Filter out all ops related to variables.
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
var_prefix = var.name + "/"
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
# Scope does not contain ops as immediate children => recurse deeper.
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops)
if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
visited = set()
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
token = rel_name.split("/")[0]
if token not in visited:
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
visited.add(token)
return
# Report layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
layer_trainables = [var for _name, var in cur_vars if var.trainable]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
rows += [["---"] * 4]
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables)
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
weights.sort(key=lambda x: len(x.name))
if len(weights) == 0 and len(layer_trainables) == 1:
weights = layer_trainables
total_params += num_params
if not hide_layers_with_no_params or num_params != 0:
num_params_str = str(num_params) if num_params > 0 else "-"
output_shape_str = str(layer_output.shape)
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
rows += [["---"] * 4]
rows += [["Total", str(total_params), "", ""]]
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
# Backwards-compatible emulation of legacy output transformation in Network.run().
_print_legacy_warning = True
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
global _print_legacy_warning
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
return output_transform, dynamic_kwargs
if _print_legacy_warning:
_print_legacy_warning = False
print()
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
print()
assert output_transform is None
new_kwargs = dict(dynamic_kwargs)
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
new_transform["func"] = _legacy_output_transform_func
return new_transform, new_kwargs
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
|
{
"content_hash": "e482d8c2f09571308246d7a7b730957d",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 165,
"avg_line_length": 50.96277495769881,
"alnum_prop": 0.6190776586208041,
"repo_name": "microsoft/DiscoFaceGAN",
"id": "d888a90dd23c1a941b5fb501afec1efcb763b5ea",
"size": "30121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnnlib/tflib/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380445"
}
],
"symlink_target": ""
}
|
STAT_SUCCESS = 'success'
STAT_FAILURE = 'failure'
STAT_INVALID_PARAMS = 'invalid_parameters'
class LogException(Exception):
"""用来记录日志的异常
只有一个参数: status.
"""
def __init__(self, status):
self.__status = status
@property
def status(self):
return self.__status
class Dict(dict):
"""会自动转码的词典
"""
def __setitem__(self, key, value):
if type(key) is unicode:
key = key.encode("utf-8")
if type(value) is unicode:
value = value.encode("utf-8")
super(Dict, self).__setitem__(key, value)
|
{
"content_hash": "7e3822a805199d9ecc6d5d9f765f062b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 49,
"avg_line_length": 19.5,
"alnum_prop": 0.5675213675213675,
"repo_name": "jelly-ape/dts_server",
"id": "8a28a9c366ba4c3e0136a84a4ff9b1ce332174f2",
"size": "679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/api/libs/define.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "869"
},
{
"name": "HTML",
"bytes": "599"
},
{
"name": "JavaScript",
"bytes": "5019"
},
{
"name": "Python",
"bytes": "41941"
},
{
"name": "Shell",
"bytes": "952"
}
],
"symlink_target": ""
}
|
class EmptyTraceImporter(object):
"""Imports empty traces."""
def __init__(self, model, event_data, import_priority=0):
pass
@staticmethod
def CanImport(event_data):
if isinstance(event_data, list):
return len(event_data) == 0
elif isinstance(event_data, basestring):
return len(event_data) == 0
return False
def ImportEvents(self):
pass
def FinalizeImport(self):
pass
|
{
"content_hash": "f1244b5ade50158f1a556db600fcc112",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6642857142857143,
"repo_name": "ChromiumWebApps/chromium",
"id": "df664c020891b45305c13314b9f112b8fd82404e",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/timeline/empty_trace_importer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42286199"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "198616766"
},
{
"name": "CSS",
"bytes": "937333"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5695686"
},
{
"name": "JavaScript",
"bytes": "21967126"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2262"
},
{
"name": "Objective-C",
"bytes": "7602057"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1210885"
},
{
"name": "Python",
"bytes": "10774996"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1316721"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15243"
}
],
"symlink_target": ""
}
|
from textwrap import dedent
import pytest
from bokeh.models import FuncTickFormatter, Slider
flexx = pytest.importorskip("flexx")
def test_functickformatter_from_py_func_no_args():
def convert_to_minutes():
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = flexx.pyscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter();\n"
def test_functickformatter_from_py_func_with_args():
slider = Slider()
def convert_to_minutes(x=slider):
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = flexx.pyscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter(x);\n"
assert formatter.args['x'] is slider
def test_functickformatter_bad_pyfunc_formats():
def has_positional_arg(x):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg)
def has_positional_arg_with_kwargs(y, x=5):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg_with_kwargs)
def has_non_Model_keyword_argument(x=10):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_non_Model_keyword_argument)
def test_functickformatter_from_coffeescript_no_arg():
coffee_code = dedent("""
square = (x) -> x * x
return square(tick)
""")
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code)
assert formatter.code == dedent("""\
"use strict";
var square;
square = function (x) {
return x * x;
};
return square(tick);
""")
assert formatter.args == {}
def test_functickformatter_from_coffeescript_with_args():
coffee_code = dedent("""
return slider.get("value") // 2 + tick
""")
slider = Slider()
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code, args={"slider": slider})
assert formatter.code == dedent("""\
"use strict";
return Math.floor(slider.get("value") / 2) + tick;
""")
assert formatter.args == {"slider": slider}
|
{
"content_hash": "177eccd7467e36d97427302ce83767a3",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 29.195121951219512,
"alnum_prop": 0.6524644945697577,
"repo_name": "azjps/bokeh",
"id": "f610ba0b8ffdb004c63f46a79e31d5b533c5f60b",
"size": "2394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/models/tests/test_formatters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "92582"
},
{
"name": "CoffeeScript",
"bytes": "1051340"
},
{
"name": "HTML",
"bytes": "46812"
},
{
"name": "JavaScript",
"bytes": "34439"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "2152481"
},
{
"name": "Shell",
"bytes": "13140"
},
{
"name": "TypeScript",
"bytes": "87868"
}
],
"symlink_target": ""
}
|
import os, shutil
import plistlib
import errno
from sys import argv
from mod_pbxproj import XcodeProject
projectPath = argv[1]
frameworkPath = argv[2]
def log(x):
with open('GBiOSBuildProcessLog.txt', 'a') as f:
f.write(x + "\n")
log('------------------------------------------------------------\n')
log(' Start post_process.py \n')
log('------------------------------------------------------------\n')
log('Unity Project Path --> ' + projectPath)
log('FrameWork Path --> ' + frameworkPath)
log('------------------------------------------------------------\n')
log(' 1. Register SNS info \n')
log('------------------------------------------------------------\n')
plist_path = os.path.join(projectPath, 'Info.plist')
plist = plistlib.readPlist(plist_path)
# usage
# 1. Facebook login
# - Add CFBundleURLSchemes / fb{FACEBOOK_APP_ID}
# - Add FacebookAppID
# - Add FacebookDisplayName
# 2. Google Plus
# - Add CFBundleURLSchemes / ${Bundle identifier}
# 3. Twitter
# - Add CFBundleURLSchemes / tw.{$Bundle identifier}
bundle_identifier = plist["CFBundleIdentifier"]
facebookAppID = "379212652436292"
sns_setting = [{
"CFBundleTypeRole" : "Editor",
"CFBundleURLName" : "%s" % (bundle_identifier),
"CFBundleURLSchemes" : ["fb%s" % facebookAppID]
}]
plist["CFBundleURLTypes"] = sns_setting
'''
<key>LSApplicationQueriesSchemes</key>
<array>
<string>fbapi</string>
<string>fbapi20130214</string>
<string>fbapi20130410</string>
<string>fbapi20130702</string>
<string>fbapi20131010</string>
<string>fbapi20131219</string>
<string>fbapi20140410</string>
<string>fbapi20140116</string>
<string>fbapi20150313</string>
<string>fbapi20150629</string>
<string>fbapi20160328</string>
<string>fbauth</string>
<string>fbauth2</string>
<string>fb-messenger-api20140430</string>
</array>
'''
fb_schmes = [
"fbauth", "fbauth2", "fbapi",
]
plist["LSApplicationQueriesSchemes"] = fb_schmes
if len(facebookAppID) > 0:
plist["FacebookAppID"] = facebookAppID
plist["AppLovinSdkKey"] = "wsGT89gFuGFIZrLsp6MrS_TQaRU_HuBCkSftbL6UcMnAB61_DOqgOI5zkaz0S9CAbt2CC8gqUS_gZ0fnPURonX"
plistlib.writePlist(plist, plist_path)
log('------------------------------------------------------------\n')
log(' 2. Add library (Framework) in Project \n')
log('------------------------------------------------------------\n')
project = XcodeProject.Load(projectPath + '/Unity-iPhone.xcodeproj/project.pbxproj')
log('Loaded project.pbxproj.')
result = project.add_file(frameworkPath + 'GBSdk.framework', tree='SDKROOT')
log('Added GBSdk SDK Framework')
project.add_file(frameworkPath + 'GoogleMobileAds.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'UnityAds.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'AppLovinSDK.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'Bolts.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'FBSDKCoreKit.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'FBSDKLoginKit.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'VungleSDK.framework', tree='SDKROOT')
project.add_file('System/Library/Frameworks/AdSupport.framework', tree='SDKROOT')
project.add_framework_search_paths(frameworkPath)
log('------------------------------------------------------------\n')
log(' 2-1. iOS9 Delete / Changed Library path \n')
log('------------------------------------------------------------\n')
project.add_file('usr/lib/libz.tbd', tree='SDKROOT')
project.add_file('usr/lib/libsqlite3.tbd', tree='SDKROOT')
log('------------------------------------------------------------\n')
log(' 3. Set Flag in Project Build Setting \n')
log('------------------------------------------------------------\n')
project.add_other_ldflags('-ObjC')
project.add_single_valued_flag('ENABLE_BITCODE', 'NO')
project.add_single_valued_flag('CLANG_ENABLE_MODULES', 'YES')
project.save()
log('------------------------------\n'
' Saved Project. \n'
'------------------------------')
|
{
"content_hash": "826c99c249f79f6f81f78e0ec5225950",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 114,
"avg_line_length": 35.24786324786325,
"alnum_prop": 0.5834141610087293,
"repo_name": "nairs77/GB-Unity-Plugin",
"id": "da5f3116d8e57fe2ceca7ce11a5df3c412113208",
"size": "4519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Assets/GB/Editor/post_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4410"
},
{
"name": "C#",
"bytes": "1683767"
},
{
"name": "Objective-C",
"bytes": "780856"
},
{
"name": "Objective-C++",
"bytes": "29102"
},
{
"name": "Python",
"bytes": "78247"
},
{
"name": "Shell",
"bytes": "254"
}
],
"symlink_target": ""
}
|
import py.test
import unittest
class MyPlufinSchedulerTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
|
{
"content_hash": "7eaae84a5fe488ad3bd38a0a6c31a020",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 16.11764705882353,
"alnum_prop": 0.6204379562043796,
"repo_name": "mrmcmuffinz/rpi.alarm",
"id": "5ac74ed0878cc6e9c944be3521161f8a9427452e",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugin_scheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17173"
}
],
"symlink_target": ""
}
|
'''
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
'''
# Use one queue
class MinStack:
# initialize your data structure here.
def __init__(self):
self.q = []
# @param x, an integer
# @return nothing
def push(self, x):
minvalue = self.getMin()
if minvalue == None or x < minvalue:
minvalue = x
self.q.append((x,minvalue))
# @return nothing
def pop(self):
if self.q:
self.q.pop()
# @return an integer
def top(self):
if self.q:
return self.q[-1][0]
# @return an integer
def getMin(self):
if not self.q:
return None
else:
return self.q[-1][1]
# Use two queues
class MinStack:
# @param x, an integer
# @return an integer
def __init__(self):
self.list1 = []
self.list2 = []
def push(self, x):
self.list1.append(x)
if not self.list2 or self.list2[-1] >= x: # Note, here is >=, test case: push0,push1,push0,getmin,pop,getmin
self.list2.append(x)
# @return nothing
def pop(self):
if self.list1:
top = self.list1[-1]
self.list1.pop()
if self.list2 and self.list2[-1] == top:
self.list2.pop()
# @return an integer
def top(self):
if self.list1:
return self.list1[-1]
# @return an integer
def getMin(self):
if self.list2:
return self.list2[-1]
|
{
"content_hash": "f6f96ee4e71c20ffd6433eef2d202901",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 117,
"avg_line_length": 24.36111111111111,
"alnum_prop": 0.5347776510832383,
"repo_name": "UmassJin/Leetcode",
"id": "5170e968e7c4fd426417e932d638972e848f4142",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Array/Min_Stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "717672"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
# standard imports
import configargparse as argparse
import numpy as np
import h5py
import vigra
import time
import glob
import logging
from skimage.external import tifffile
def get_num_frames(options):
if len(options.input_files) == 1:
with h5py.File(options.input_files[0], 'r') as in_h5:
return in_h5[options.label_image_path].shape[0]
else:
return len(options.input_files)
def get_frame_label_image(timestep, options):
if len(options.input_files) == 1:
with h5py.File(options.input_files[0], 'r') as in_h5:
return np.array(in_h5[options.label_image_path][timestep, ..., 0]).squeeze()
else:
with h5py.File(options.input_files[timestep], 'r') as in_h5:
return np.array(in_h5[options.label_image_path]).squeeze()
def get_frame_dataset(timestep, dataset, options):
if len(options.input_files) == 1:
with h5py.File(options.input_files[0], 'r') as in_h5:
ds_name = 'tracking/' + format(timestep, "0{}".format(options.h5group_zero_padding)) + '/' + dataset
if ds_name in in_h5:
return np.array(in_h5[ds_name])
else:
with h5py.File(options.input_files[timestep], 'r') as in_h5:
ds_name = 'tracking/' + dataset
if ds_name in in_h5:
return np.array(in_h5[ds_name])
return np.zeros(0)
def save_frame_to_tif(timestep, label_image, options):
if len(options.input_files) == 1:
filename = options.output_dir + '/man_track' + format(timestep, "0{}".format(options.filename_zero_padding)) + '.tif'
else:
filename = options.output_dir + '/mask' + format(timestep, "0{}".format(options.filename_zero_padding)) + '.tif'
label_image = np.swapaxes(label_image, 0, 1)
if len(label_image.shape) == 2: # 2d
vigra.impex.writeImage(label_image.astype('uint16'), filename)
else: # 3D
label_image = np.transpose(label_image, axes=[2, 0, 1])
tifffile.imsave(filename, label_image.astype('uint16'))
def save_tracks(tracks, num_frames, options):
if len(options.input_files) == 1:
filename = options.output_dir + '/man_track.txt'
else:
filename = options.output_dir + '/res_track.txt'
with open(filename, 'wt') as f:
for key, value in tracks.items():
if len(value) == 2:
value.append(num_frames - 1)
# our track value contains parent, begin, end
# but here we need begin, end, parent. so swap
f.write("{} {} {} {}\n".format(key, value[1], value[2], value[0]))
def remap_label_image(label_image, mapping):
"""
given a label image and a mapping, creates and
returns a new label image with remapped object pixel values
"""
remapped_label_image = np.zeros(label_image.shape, dtype=label_image.dtype)
for dest, src in mapping.items():
remapped_label_image[label_image == dest] = src
return remapped_label_image
def convert_label_volume(options):
num_frames = get_num_frames(options)
if num_frames == 0:
logging.getLogger('hdf5_to_ctc.py').error("Cannot work on empty set")
return
# for each track, indexed by first label, store [parent, begin, end]
tracks = {}
old_mapping = {} # mapping from label_id to track_id
new_track_id = 1
# handle frame 0 -> only add those nodes that are referenced from frame 1 events
label_image = get_frame_label_image(0, options)
label_image_indices = np.unique(label_image)
logging.getLogger('hdf5_to_ctc.py').debug("Processing frame 0 of shape {}".format(label_image.shape))
moves = get_frame_dataset(1, "Moves", options)
splits = get_frame_dataset(1, "Splits", options)
# splits could be empty
if len(splits) == 0:
if len(moves) == 0:
referenced_labels = set([])
else:
referenced_labels = set(moves[:, 0])
elif len(moves) == 0:
referenced_labels = set(splits[:, 0])
else:
referenced_labels = set(moves[:, 0]) | set(splits[:, 0]) # set union
for l in referenced_labels:
if l == 0 or not l in label_image_indices:
continue
old_mapping[l] = new_track_id
tracks[new_track_id] = [0, 0]
new_track_id += 1
remapped_label_image = remap_label_image(label_image, old_mapping)
save_frame_to_tif(0, remapped_label_image, options)
logging.getLogger('hdf5_to_ctc.py').debug("Tracks in first frame: {}".format(new_track_id))
# handle all further frames by remapping their indices
for frame in range(1, num_frames):
old_label_image = label_image
old_label_image_indices = np.unique(old_label_image)
start_time = time.time()
label_image = get_frame_label_image(frame, options)
label_image_indices = np.unique(label_image)
logging.getLogger('hdf5_to_ctc.py').debug("Processing frame {} of shape {}".format(frame, label_image.shape))
mapping = {}
moves = get_frame_dataset(frame, "Moves", options)
splits = get_frame_dataset(frame, "Splits", options)
# find the continued tracks
for src, dest in moves:
if src == 0 or dest == 0 or not src in old_label_image_indices or not dest in label_image_indices:
continue
# see whether this was a track continuation or the first leg of a new track
if src in old_mapping.keys():
mapping[dest] = old_mapping[src]
elif len(splits)==0 or src not in list(splits[:,0]):
mapping[dest] = new_track_id
tracks[new_track_id] = [0, frame]
new_track_id += 1
# find all divisions
for s in range(splits.shape[0]):
# end parent track
parent = splits[s, 0]
if parent in old_mapping.keys():
tracks[old_mapping[parent]].append(frame - 1)
elif not parent in old_label_image_indices:
logging.getLogger('hdf5_to_ctc.py').warning("Found division where parent id was not present in previous frame")
parent = 0
old_mapping[parent] = 0
else:
# insert a track of length 1 as parent of the new track
old_mapping[parent] = new_track_id
tracks[new_track_id] = [0, frame - 1, frame - 1]
new_track_id += 1
logging.getLogger('hdf5_to_ctc.py').warning("Adding single-node-track parent of division with id {}".format(new_track_id - 1))
remapped_label_image = remap_label_image(old_label_image, old_mapping)
save_frame_to_tif(frame-1, remapped_label_image, options)
# create new tracks for all children
for c in splits[s, 1:]:
if c in label_image_indices:
tracks[new_track_id] = [old_mapping[parent], frame]
mapping[c] = new_track_id
new_track_id += 1
else:
logging.getLogger('hdf5_to_ctc.py').warning("Discarding child {} of parent track {} because it is not present in image".format(c, parent))
# find all tracks that ended (so not in a move or split (-> is parent))
disappeared_indices = set(old_mapping.values()) - set(mapping.values())
for idx in disappeared_indices:
tracks[idx].append(frame - 1)
# create a new label image with remapped indices (only those of tracks) and save it
remapped_label_image = remap_label_image(label_image, mapping)
save_frame_to_tif(frame, remapped_label_image, options)
# save for next iteration
old_mapping = mapping
logging.getLogger('hdf5_to_ctc.py').debug("\tFrame done in {} secs".format(time.time() - start_time))
logging.getLogger('hdf5_to_ctc.py').debug("Track count is now at {}".format(new_track_id))
logging.getLogger('hdf5_to_ctc.py').info("Done processing frames, saving track info...")
# done, save tracks
save_tracks(tracks, num_frames, options)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert H5 event tracking solution to CTC format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', is_config_file=True, help='config file path')
parser.add_argument('--ctc-output-dir', type=str, dest='output_dir', required=True,
help='Folder where to save the label images starting with man_track00.tif, as well as a file man_track.txt')
parser.add_argument('--h5-event-input-file-pattern', type=str, dest='input_file_pattern', required=True,
help='HDF5 file of ground truth, or file pattern matching individual frames')
parser.add_argument('--h5-event-label-image-path', type=str, dest='label_image_path', default='label_image',
help='Path inside the HDF5 file(s) to the label image')
parser.add_argument('--ctc-filename-zero-pad-length', type=int, dest='filename_zero_padding', default='3')
parser.add_argument('--h5-group-zero-pad-length', type=int, dest='h5group_zero_padding', default='4')
parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
# parse command line
args, unknown = parser.parse_known_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger('hdf5_to_ctc.py').debug("Ignoring unknown parameters: {}".format(unknown))
# find all files matching the pattern
args.input_files = glob.glob(args.input_file_pattern)
args.input_files.sort()
logging.info("Found {} files".format(len(args.input_files)))
# make sure output directory exists
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
convert_label_volume(args)
|
{
"content_hash": "f38f4a20a6f6f4a1b350f39a616e7981",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 158,
"avg_line_length": 43.61206896551724,
"alnum_prop": 0.6181063451274955,
"repo_name": "chaubold/hytra",
"id": "237acdb30d83199e6ceeb9ee883a1ae03638e89f",
"size": "10233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ctc/hdf5_to_ctc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "34455"
},
{
"name": "Matlab",
"bytes": "1724"
},
{
"name": "Python",
"bytes": "1074567"
},
{
"name": "Shell",
"bytes": "14137"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.template import RequestContext
def angular_view(request):
context = RequestContext(request)
return render_to_response('index.html', context_instance=context)
|
{
"content_hash": "63b9ec3e8c50f9e9da35b055fe5ffaaa",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 69,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.8008849557522124,
"repo_name": "Laimiux/mydeatree",
"id": "7eb20cc5c142ce2834555e3244b71dea484595b3",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "244905"
},
{
"name": "Erlang",
"bytes": "4276"
},
{
"name": "JavaScript",
"bytes": "3885310"
},
{
"name": "Python",
"bytes": "4957038"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from matplotlib_scalebar.scalebar import ScaleBar
def plot_morph(ax, df_paths, view, plot_axon, plot_basal_dendrites, plot_apical_dendrites):
if view == 'xy':
axis0 = 0
axis1 = 1
elif view == 'xz':
axis0 = 0
axis1 = 2
elif view == 'yz':
axis0 = 1
axis1 = 2
soma = df_paths[df_paths.type == 1].path[0][0]
axon = df_paths[df_paths.type == 2]
basal_dendrites = df_paths[df_paths.type == 3]
apical_dendrites = df_paths[df_paths.type == 4]
ax.scatter(0, 0, s=280, color='grey')
if plot_basal_dendrites and len(basal_dendrites)>0:
bdcolors_idx = np.linspace(0, 200, max(basal_dendrites.branch_order)+1).astype(int)
bdcolors = np.vstack(plt.cm.Reds_r(bdcolors_idx))[:, :3]
for row in basal_dendrites.iterrows():
path_id = row[0]
path = row[1]['path'] - soma
order = row[1]['branch_order']
bpt = path[0]
dend_plot = ax.plot(path[:, axis0], path[:, axis1], color=bdcolors[int(order)])
ax.scatter(bpt[axis0], bpt[axis1], color=bdcolors[int(order)], zorder=1)
if plot_apical_dendrites and len(apical_dendrites)>0:
adcolors_idx = np.linspace(0, 200, max(apical_dendrites.branch_order)+1).astype(int)
adcolors = np.vstack(plt.cm.Purples_r(adcolors_idx))[:, :3]
for row in apical_dendrites.iterrows():
path_id = row[0]
path = row[1]['path'] - soma
order = row[1]['branch_order']
bpt = path[0]
dend_plot = ax.plot(path[:, axis0], path[:, axis1], color=adcolors[int(order)])
ax.scatter(bpt[axis0], bpt[axis1], color=adcolors[int(order)], zorder=1)
if plot_axon and len(axon)>0:
acolors_idx = np.linspace(0, 200, max(axon.branch_order)+1).astype(int)
acolors = np.vstack(plt.cm.Blues_r(acolors_idx))[:, :3]
for row in axon.iterrows():
path_id = row[0]
path = row[1]['path'] - soma
order = row[1]['branch_order']
bpt = path[0]
axon_plot = ax.plot(path[:, axis0], path[:, axis1], color=acolors[int(order)])
ax.scatter(bpt[axis0], bpt[axis1], color=acolors[int(order)], zorder=1)
lim_max = int(np.ceil((np.vstack(df_paths.path.as_matrix()) - soma).max() / 20) * 20)
lim_min = int(np.floor((np.vstack(df_paths.path.as_matrix()) - soma).min() / 20) * 20)
lim = max(abs(lim_max), abs(lim_min))
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
scalebar = ScaleBar(1, units='um', location='lower left', box_alpha=0)
ax.add_artist(scalebar)
ax.set_title('{}'.format(view))
ax.axis('off')
return ax
def plot_persistence_diagram(data, ax):
"""
Plots the persistence diagram defined in data as a scatter plot.
:param data: pandas.DataFrame holding the persistence data.
:param ax: axis for the data to be plotted on.
:return: axis ax of the plot
"""
ax.scatter(data['birth'], data['death'], s=4, c='k', alpha=.4)
ax.plot([0, np.max(data['birth'])], [0, np.max(data['death'])])
sns.despine()
ax.set_xlabel('birth [dist from soma in um]')
ax.set_ylabel('death [dist from soma in um]')
ax.set_title('persistence diagram')
return ax
def plot_persistence_image_1d(data, ax):
"""
Plots the persistence as a 1 dimensional persistence image as defined in _Metrics for comparing neuronal tree
shapes based on persistent homology_ Y. Li, D. Wang, G. Ascoli et al. , 2017.
The 1D persistence image is defined as a sum of Gaussian kernels located at the time of birth of each branch and
weighted by its lifetime (|birth - death|).
Formally:
$p_D(x) = \sum_{i=1}^{k} |y_i - x_i|\cdot K_t(x,x_i)$
where $K_t(x,x_i)$ denotes a Gaussian kernel centered at $x_i$ with width $t$. Here $t$ is chosen to be $50$ as
in the original paper.
:param data: pandas.DataFrame holding the persistence data.
:param ax: axis for the data to be plotted on.
:return: axis ax of the plot
"""
steps = 100
y = np.zeros((steps,))
x = np.linspace(0, np.max(data['birth']), steps)
t = 50
for k, p in data.iterrows():
m = np.abs(p['birth'] - p['death'])
y += m * norm.pdf(x, loc=p['birth'], scale=t)
ax.plot(x, y)
ax.set_xlabel('birth [dist from soma in um]')
ax.set_ylabel('persistence p_D(x)')
sns.despine()
ax.set_title('persistence image 1D \n (Wang et al., 2017)')
return ax
def plot_persistence_image_2d(data, ax):
"""
Plots a 2d Gaussian kernel density estimate of the persistence diagram.
:param data: pandas.DataFrame holding the persistence data.
:param ax: axis for the data to be plotted on.
:return: ax
"""
sns.kdeplot(data['birth'], data['death'], ax=ax)
ax.scatter(data['birth'], data['death'], s=4, c='k', alpha=.4)
ax.set_xlabel('birth [dist from soma in um]')
ax.set_ylabel('death [dist from soma in um]')
sns.despine()
ax.set_title('persistence image 2D \n(Kanari et al., 2016) \n https://arxiv.org/abs/1603.08432')
return ax
|
{
"content_hash": "24df4b9af79adc8ca69b4302fda2d0d3",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 119,
"avg_line_length": 34.38562091503268,
"alnum_prop": 0.6027371222201102,
"repo_name": "huangziwei/MorphoPy",
"id": "54af9ab4217b3231660ae3f418d5a867c26610a8",
"size": "5261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morphopy/_utils/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39973"
}
],
"symlink_target": ""
}
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin, MultiOutputMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model._ridge import _solve_cholesky_kernel
from .utils.validation import check_is_fitted, _check_sample_weight
from .utils.validation import _deprecate_positional_args
from .utils.deprecation import deprecated
class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : float or array-like of shape (n_targets,), default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number. See :ref:`ridge_regression` for formula.
kernel : string or callable, default="linear"
Kernel mapping used internally. This parameter is directly passed to
:class:`~sklearn.metrics.pairwise.pairwise_kernel`.
If `kernel` is a string, it must be one of the metrics
in `pairwise.PAIRWISE_KERNEL_FUNCTIONS`.
If `kernel` is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if `kernel` is a callable function, it is called on
each pair of instances (rows) and the resulting value recorded. The
callable should take two rows from X as input and return the
corresponding kernel value as a single number. This means that
callables from :mod:`sklearn.metrics.pairwise` are not allowed, as
they operate on matrices, not single samples. Use the string
identifying the kernel instead.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
Representation of weight vector(s) in kernel space
X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data, which is also required for prediction. If
kernel == "precomputed" this is instead the precomputed
training matrix, of shape (n_samples, n_samples).
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See Also
--------
sklearn.linear_model.Ridge : Linear ridge regression.
sklearn.svm.SVR : Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y)
KernelRidge(alpha=1.0)
"""
@_deprecate_positional_args
def __init__(self, alpha=1, *, kernel="linear", gamma=None, degree=3,
coef0=1, kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def _more_tags(self):
return {'pairwise': self.kernel == 'precomputed'}
# TODO: Remove in 0.26
# mypy error: Decorated property not supported
@deprecated("Attribute _pairwise was deprecated in " # type: ignore
"version 0.24 and will be removed in 0.26.")
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. If kernel == "precomputed" this is instead
a precomputed kernel matrix, of shape (n_samples, n_samples).
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = self._validate_data(X, y, accept_sparse=("csr", "csc"),
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = _check_sample_weight(sample_weight, X)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples. If kernel == "precomputed" this is instead a
precomputed kernel matrix, shape = [n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for this estimator.
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=("csr", "csc"), reset=False)
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
{
"content_hash": "4d7200d0a6b8ef40c8667509c77499a2",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 40.26190476190476,
"alnum_prop": 0.6347723240685985,
"repo_name": "ndingwall/scikit-learn",
"id": "119b27e9084ae34b13c479834671b239c1078d6d",
"size": "8455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/kernel_ridge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
}
|
NAME = 'Edgecast / Verizon Digital media'
def is_waf(self):
if self.matchheader(('Server', '^ECD \\(.*?\\)$')):
return True
if self.matchheader(('Server', '^ECS \\(.*?\\)$')):
return True
return False
|
{
"content_hash": "80e503991b59e0cb1d0b1d812d955f7d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 28.625,
"alnum_prop": 0.5502183406113537,
"repo_name": "sandrogauci/wafw00f",
"id": "bff05da117299233274c4b965c389c4856d6afd1",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wafw00f/plugins/edgecast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "434"
},
{
"name": "Python",
"bytes": "55796"
}
],
"symlink_target": ""
}
|
"""Package of Sodoku solvers as a graph coloring."""
__all__ = ['io', 'coloring', 'coloring']
|
{
"content_hash": "59c6bb7094d8ca64ea970e35e200debe",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 52,
"avg_line_length": 47,
"alnum_prop": 0.6276595744680851,
"repo_name": "jlramalheira/sudoku",
"id": "e4321125c50e3168c171ba453df62eeb72022c4d",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sudoku/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19932"
}
],
"symlink_target": ""
}
|
from tempfile import NamedTemporaryFile
from airflow.contrib.hooks.gcs_hook import (GoogleCloudStorageHook,
_parse_gcs_url)
from airflow.contrib.operators.s3_list_operator import S3ListOperator
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.utils.decorators import apply_defaults
class S3ToGoogleCloudStorageOperator(S3ListOperator):
"""
Synchronizes an S3 key, possibly a prefix, with a Google Cloud Storage
destination path.
:param bucket: The S3 bucket where to find the objects. (templated)
:type bucket: str
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:type prefix: str
:param delimiter: the delimiter marks key hierarchy. (templated)
:type delimiter: str
:param aws_conn_id: The source S3 connection
:type aws_conn_id: str
:parame verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param dest_gcs_conn_id: The destination connection ID to use
when connecting to Google Cloud Storage.
:type dest_gcs_conn_id: str
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:type dest_gcs: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param replace: Whether you want to replace existing destination files
or not.
:type replace: bool
**Example**:
.. code-block:: python
s3_to_gcs_op = S3ToGoogleCloudStorageOperator(
task_id='s3_to_gcs_example',
bucket='my-s3-bucket',
prefix='data/customers-201804',
dest_gcs_conn_id='google_cloud_default',
dest_gcs='gs://my.gcs.bucket/some/customers/',
replace=False,
dag=my-dag)
Note that ``bucket``, ``prefix``, ``delimiter`` and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields = ('bucket', 'prefix', 'delimiter', 'dest_gcs')
ui_color = '#e09411'
@apply_defaults
def __init__(self,
bucket,
prefix='',
delimiter='',
aws_conn_id='aws_default',
verify=None,
dest_gcs_conn_id=None,
dest_gcs=None,
delegate_to=None,
replace=False,
*args,
**kwargs):
super(S3ToGoogleCloudStorageOperator, self).__init__(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
aws_conn_id=aws_conn_id,
*args,
**kwargs)
self.dest_gcs_conn_id = dest_gcs_conn_id
self.dest_gcs = dest_gcs
self.delegate_to = delegate_to
self.replace = replace
self.verify = verify
if dest_gcs and not self._gcs_object_is_directory(self.dest_gcs):
self.log.info(
'Destination Google Cloud Storage path is not a valid '
'"directory", define a path that ends with a slash "/" or '
'leave it empty for the root of the bucket.')
raise AirflowException('The destination Google Cloud Storage path '
'must end with a slash "/" or be empty.')
def execute(self, context):
# use the super method to list all the files in an S3 bucket/key
files = super(S3ToGoogleCloudStorageOperator, self).execute(context)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.dest_gcs_conn_id,
delegate_to=self.delegate_to)
if not self.replace:
# if we are not replacing -> list all files in the GCS bucket
# and only keep those files which are present in
# S3 and not in Google Cloud Storage
bucket_name, object_prefix = _parse_gcs_url(self.dest_gcs)
existing_files_prefixed = gcs_hook.list(
bucket_name, prefix=object_prefix)
existing_files = []
if existing_files_prefixed:
# Remove the object prefix itself, an empty directory was found
if object_prefix in existing_files_prefixed:
existing_files_prefixed.remove(object_prefix)
# Remove the object prefix from all object string paths
for f in existing_files_prefixed:
if f.startswith(object_prefix):
existing_files.append(f[len(object_prefix):])
else:
existing_files.append(f)
files = set(files) - set(existing_files)
if len(files) > 0:
self.log.info('{0} files are going to be synced: {1}.'.format(
len(files), files))
else:
self.log.info(
'There are no new files to sync. Have a nice day!')
if files:
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
for file in files:
# GCS hook builds its own in-memory file so we have to create
# and pass the path
file_object = hook.get_key(file, self.bucket)
with NamedTemporaryFile(mode='wb', delete=True) as f:
file_object.download_fileobj(f)
f.flush()
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(
self.dest_gcs)
# There will always be a '/' before file because it is
# enforced at instantiation time
dest_gcs_object = dest_gcs_object_prefix + file
# Sync is sequential and the hook already logs too much
# so skip this for now
# self.log.info(
# 'Saving file {0} from S3 bucket {1} in GCS bucket {2}'
# ' as object {3}'.format(file, self.bucket,
# dest_gcs_bucket,
# dest_gcs_object))
gcs_hook.upload(dest_gcs_bucket, dest_gcs_object, f.name)
self.log.info(
"All done, uploaded %d files to Google Cloud Storage",
len(files))
else:
self.log.info(
'In sync, no files needed to be uploaded to Google Cloud'
'Storage')
return files
# Following functionality may be better suited in
# airflow/contrib/hooks/gcs_hook.py
@staticmethod
def _gcs_object_is_directory(object):
bucket, blob = _parse_gcs_url(object)
return len(blob) == 0 or blob.endswith('/')
|
{
"content_hash": "7f6446494ad4ed9b77e89baa9469a3b7",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 40.516129032258064,
"alnum_prop": 0.5691348195329087,
"repo_name": "sid88in/incubator-airflow",
"id": "5dd355a6fd3317b2d225070706c563878cbbd1c1",
"size": "8348",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/s3_to_gcs_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "280685"
},
{
"name": "JavaScript",
"bytes": "1385622"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "4686260"
},
{
"name": "Shell",
"bytes": "34088"
}
],
"symlink_target": ""
}
|
"""
=====================================
Cross-Correlation (Phase Correlation)
=====================================
In this example, we use phase correlation to identify the relative shift
between two similar-sized images.
The ``register_translation`` function uses cross-correlation in Fourier space,
optionally employing an upsampled matrix-multiplication DFT to achieve
arbitrary subpixel precision. [1]_
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
image = data.camera()
shift = (-2.4, 1.32)
# (-2.4, 1.32) pixel offset relative to reference coin
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x):")
print(shift)
# pixel precision first
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# View the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x):")
print(shift)
# subpixel precision
shift, error, diffphase = register_translation(image, offset_image, 100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Calculate the upsampled DFT, again to show what the algorithm is doing
# behind the scenes. Constants correspond to calculated values in routine.
# See source code for details.
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected subpixel offset (y, x):")
print(shift)
|
{
"content_hash": "74aa392d6b7b814896464ddc4b976766",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.7165991902834008,
"repo_name": "ClinicalGraphics/scikit-image",
"id": "c558e107fa387ec51b3f4d05bb785cbd611856f0",
"size": "2717",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "doc/examples/transform/plot_register_translation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "76670"
},
{
"name": "Makefile",
"bytes": "571"
},
{
"name": "Python",
"bytes": "2323768"
}
],
"symlink_target": ""
}
|
import sys
# This module exists to give users an indication that they need to have
# a version of python compatible with the RLBot framework.
# Otherwise people might find out by in-the-guts error messages
# after quite a while of the runner launching.
minimum_python_version = (3, 6)
# Deliberately using old string formatting for compatibility.
error_string = """You appear to be using an old version of Python: %s
RLBot requires Python %d.%d or later.
After installing, ensure your environment point to the new Python version, then run setup.bat""" % (
(sys.version,) + minimum_python_version)
def check_python_version():
assert sys.version_info >= minimum_python_version, error_string
|
{
"content_hash": "36fa6fffef7139d8053586a649bf9a7b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 101,
"avg_line_length": 41.35294117647059,
"alnum_prop": 0.7553342816500711,
"repo_name": "drssoccer55/RLBot",
"id": "9d2ecaf45d4e2347dfba18c3b82505e08c98dd83",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/rlbot/utils/python_version_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "13520"
},
{
"name": "Python",
"bytes": "48042"
}
],
"symlink_target": ""
}
|
from bokeh.layouts import column
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.plotting import Figure, output_file, show
output_file("callback.html")
x = [x*0.005 for x in range(0, 200)]
y = x
source = ColumnDataSource(data=dict(x=x, y=y))
plot = Figure(plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
def callback(source=source, window=None):
data = source.get('data')
f = cb_obj.get('value')
x, y = data['x'], data['y']
for i in range(len(x)):
y[i] = window.Math.pow(x[i], f)
source.trigger('change');
slider = Slider(start=0.1, end=4, value=1, step=.1, title="power",
callback=CustomJS.from_py_func(callback))
layout = column(slider, plot)
show(layout)
|
{
"content_hash": "dadad6ecbd840c667d67455d54371821",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 27.964285714285715,
"alnum_prop": 0.6602809706257982,
"repo_name": "ptitjano/bokeh",
"id": "bf5865171113dea39a1ceaa282c3e4aa73c47f5f",
"size": "783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sphinx/source/docs/user_guide/source_examples/interaction_callbacks_in_python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "406989"
},
{
"name": "CoffeeScript",
"bytes": "1073573"
},
{
"name": "HTML",
"bytes": "45510"
},
{
"name": "JavaScript",
"bytes": "12173"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2083050"
},
{
"name": "Shell",
"bytes": "15584"
},
{
"name": "TypeScript",
"bytes": "25843"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_blood_razor_pirate_weak_zab_m.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "69c1a23650f69096e546a1bdaaff15e3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.6981132075471698,
"repo_name": "anhstudios/swganh",
"id": "f4954a45b7414aec264b4dab368cdda0cd855cd4",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_blood_razor_pirate_weak_zab_m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import sys
from django.conf import settings
from django.core import exceptions
from base import BaseBackend
# mostly for backend compatibility
default_backends = (
("email", "notification.backends.email.EmailBackend"),
)
def load_backends():
backends = []
for medium_id, bits in enumerate(getattr(settings, "NOTIFICATION_BACKENDS", default_backends)):
if len(bits) == 2:
label, backend_path = bits
spam_sensitivity = None
elif len(bits) == 3:
label, backend_path, spam_sensitivity = bits
else:
raise exceptions.ImproperlyConfigured, "NOTIFICATION_BACKENDS does not contain enough data."
dot = backend_path.rindex(".")
backend_mod, backend_class = backend_path[:dot], backend_path[dot+1:]
try:
# import the module and get the module from sys.modules
__import__(backend_mod)
mod = sys.modules[backend_mod]
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing notification backend %s: "%s"' % (backend_mod, e)
# add the backend label and an instantiated backend class to the
# backends list.
backend_instance = getattr(mod, backend_class)(medium_id, spam_sensitivity)
backends.append(((medium_id, label), backend_instance))
return dict(backends)
|
{
"content_hash": "c43d878985bdc9cffe6af684d63aab9f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 117,
"avg_line_length": 39.22857142857143,
"alnum_prop": 0.6525855790240349,
"repo_name": "brosner/django-notification",
"id": "c1bd97155a11d6ffac9a254106efc7a72405d117",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notification/backends/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71437"
}
],
"symlink_target": ""
}
|
import collections
from paddle.trainer_config_helpers.layers import LayerOutput
from paddle.v2.layer import parse_network
from paddle.proto import TrainerConfig_pb2
__all__ = ["dump_v2_config"]
def dump_v2_config(topology, save_path, binary=False):
""" Dump the network topology to a specified file.
This function is only used to dump network defined by using PaddlePaddle V2
APIs. This function will NOT dump configurations related to PaddlePaddle
optimizer.
:param topology: The output layers (can be more than one layers given in a
Python List or Tuple) of the entire network. Using the
specified layers (if more than one layer is given) as root,
traversing back to the data layer(s), all the layers
connected to the specified output layers will be dumped.
Layers not connceted to the specified will not be dumped.
:type topology: LayerOutput|List|Tuple
:param save_path: The path to save the dumped network topology.
:type save_path: str
:param binary: Whether to dump the serialized network topology or not.
The default value is false. NOTE that, if you call this
function to generate network topology for PaddlePaddle C-API,
a serialized version of network topology is required. When
using PaddlePaddle C-API, this flag MUST be set to True.
:type binary: bool
"""
if isinstance(topology, LayerOutput):
topology = [topology]
elif isinstance(topology, collections.Sequence):
for out_layer in topology:
assert isinstance(out_layer, LayerOutput), (
"The type of each element in the parameter topology "
"should be LayerOutput.")
else:
raise RuntimeError("Error input type for parameter topology.")
model_str = parse_network(topology)
with open(save_path, "w") as fout:
if binary:
fout.write(model_str.SerializeToString())
else:
fout.write(str(model_str))
|
{
"content_hash": "d839a687e9ade4d1f00fdf31e7051432",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 43.224489795918366,
"alnum_prop": 0.6543909348441926,
"repo_name": "putcn/Paddle",
"id": "5dc2111e379fd39b40e1e9bcf2e577b57b101a68",
"size": "2727",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "python/paddle/utils/dump_v2_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "272910"
},
{
"name": "C++",
"bytes": "7598375"
},
{
"name": "CMake",
"bytes": "269313"
},
{
"name": "Cuda",
"bytes": "1078779"
},
{
"name": "Go",
"bytes": "109501"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "3637137"
},
{
"name": "Shell",
"bytes": "157071"
}
],
"symlink_target": ""
}
|
"""
base.py
-------------
The base class for `Visual` objects
"""
import abc
from ..util import ABC
class Visuals(ABC):
"""
Parent of Visual classes.
"""
@abc.abstractproperty
def kind(self):
pass
@abc.abstractmethod
def update_vertices(self):
pass
@abc.abstractmethod
def update_faces(self):
pass
@abc.abstractmethod
def concatenate(self, other):
pass
@abc.abstractmethod
def crc(self):
pass
@abc.abstractmethod
def copy(self):
pass
def __add__(self, other):
"""
Concatenate two ColorVisuals objects into a single object.
Parameters
-----------
other : Visuals
Other visual to concatenate
Returns
-----------
result : Visuals
Object containing information from current
object and other in the order (self, other)
"""
return self.concatenate(other)
|
{
"content_hash": "545f96ccb90f8b1e8d31bb8d3de1e8bb",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 17.571428571428573,
"alnum_prop": 0.5548780487804879,
"repo_name": "dajusc/trimesh",
"id": "37424c4133237e69c605f2cdbf6be2dab2b44b42",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trimesh/visual/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "692521"
},
{
"name": "Shell",
"bytes": "3713"
}
],
"symlink_target": ""
}
|
import os
import json
import subprocess as sp
import re
import tempfile
from crossref.restful import Works, Etiquette
import bibtexparser
import papers
from papers.config import cached
from papers import logger
from papers.encoding import family_names, latex_to_unicode
my_etiquette = Etiquette('papers', papers.__version__, 'https://github.com/perrette/papers', 'mahe.perrette@gmail.com')
class DOIParsingError(ValueError):
pass
class DOIRequestError(ValueError):
pass
# PDF parsing / crossref requests
# ===============================
def readpdf(pdf, first=None, last=None):
if not os.path.isfile(pdf):
raise ValueError(repr(pdf) + ": not a file")
tmptxt = tempfile.mktemp(suffix='.txt')
cmd = ['pdftotext']
if first is not None: cmd.extend(['-f', str(first)])
if last is not None: cmd.extend(['-l', str(last)])
cmd.extend([pdf, tmptxt])
logger.info(' '.join(cmd))
sp.check_call(cmd)
txt = open(tmptxt).read()
os.remove(tmptxt)
return txt
def readpdf_image(pdf, first=None, last=None):
if not os.path.isfile(pdf):
raise ValueError(repr(pdf) + ": not a file")
tmpbase = tempfile.mktemp()
tmppng = tmpbase + '.png'
tmptxt = tmpbase + '.txt'
# 1st create a .png image from the uniq pdf file
cmd = ['pdftoppm', '-singlefile', '-png', '-q']
if first is not None: cmd.extend(['-f', str(first)])
if last is not None: cmd.extend(['-l', str(last)])
cmd.extend([pdf, tmpbase])
logger.info(' '.join(cmd))
# print(' '.join(cmd))
sp.check_call(cmd)
# 2nd extract text from .png using tesseract
cmd = ["tesseract", tmppng, tmpbase, "-l", "eng", "quiet"]
logger.info(' '.join(cmd))
# print(' '.join(cmd))
sp.check_call(cmd)
txt = open(tmptxt).read()
os.remove(tmptxt)
os.remove(tmppng)
return txt
REGEXP = re.compile(r'[doi,doi.org/][\s\.\:]{0,2}(10\.\d{4}[\d\:\.\-\/a-z]+)[A-Z\s,\n]')
def parse_doi(txt):
# based on: https://doeidoei.wordpress.com/2009/10/22/regular-expression-to-match-a-doi-digital-object-identifier/
# doi = r'[doi|DOI][\s\.\:]{0,2}(10\.\d{4}[\d\:\.\-\/a-z]+)[A-Z\s]'
# maybe try that? (need to convert to python-regex)
# https://www.crossref.org/blog/dois-and-matching-regular-expressions/
# a. /^10.\d{4,9}/[-._;()/:A-Z0-9]+$/i
# b. /^10.1002/[^\s]+$/i
# c. /^10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d$/i
# d. /^10.1021/\w\w\d++$/i
# e. /^10.1207/[\w\d]+\&\d+_\d+$/i
matches = REGEXP.findall(' '+txt.lower()+' ')
if not matches:
raise DOIParsingError('parse_doi::no matches')
match = matches[0]
# clean expression
doi = match.replace('\n','').strip('.')
if doi.lower().endswith('.received'):
doi = doi[:-len('.received')]
# quality check
if len(doi) <= 8:
raise DOIParsingError('failed to extract doi: '+doi)
return doi
def isvaliddoi(doi):
try:
doi2 = parse_doi('doi:'+doi)
except:
return False
return doi.lower() == doi2.lower()
def pdfhead(pdf, maxpages=10, minwords=200, image=False):
""" read pdf header
"""
i = 0
txt = ''
while len(txt.strip().split()) < minwords and i < maxpages:
i += 1
logger.debug('read pdf page: '+str(i))
if image:
txt += readpdf_image(pdf, first=i, last=i)
else:
txt += readpdf(pdf, first=i, last=i)
return txt
def extract_pdf_doi(pdf, image=False):
return parse_doi(pdfhead(pdf, image=image))
def query_text(txt, max_query_words=200):
# list of paragraphs
paragraphs = re.split(r"\n\n", txt)
# remove anything that starts with 'reference'
query = []
for p in paragraphs:
if p.lower().startswith('reference'):
continue
query.append(p)
query_txt = ' '.join(query)
# limit overall length
query_txt = ' '.join(query_txt.strip().split()[:max_query_words])
assert len(query_txt.split()) >= 3, 'needs at least 3 query words, got: '+repr(query_txt)
return query_txt
def extract_txt_metadata(txt, search_doi=True, search_fulltext=False, max_query_words=200, scholar=False):
"""extract metadata from text, by parsing and doi-query, or by fulltext query in google scholar
"""
assert search_doi or search_fulltext, 'no search criteria specified for metadata'
bibtex = None
if search_doi:
try:
logger.debug('parse doi')
doi = parse_doi(txt)
logger.info('found doi:'+doi)
logger.debug('query bibtex by doi')
bibtex = fetch_bibtex_by_doi(doi)
logger.debug('doi query successful')
except DOIParsingError as error:
logger.debug('doi parsing error: '+str(error))
except DOIRequestError as error:
return '''@misc{{{doi},
doi = {{{doi}}},
url = {{http://dx.doi.org/{doi}}},
}}'''.format(doi=doi)
except ValueError as error:
raise
# logger.debug(u'failed to obtained bibtex by doi search: '+str(error))
if search_fulltext and not bibtex:
logger.debug('query bibtex by fulltext')
query_txt = query_text(txt, max_query_words)
if scholar:
bibtex = fetch_bibtex_by_fulltext_scholar(query_txt)
else:
bibtex = fetch_bibtex_by_fulltext_crossref(query_txt)
logger.debug('fulltext query successful')
if not bibtex:
raise ValueError('failed to extract metadata')
return bibtex
def extract_pdf_metadata(pdf, search_doi=True, search_fulltext=True, maxpages=10, minwords=200, image=False, **kw):
txt = pdfhead(pdf, maxpages, minwords, image=image)
return extract_txt_metadata(txt, search_doi, search_fulltext, **kw)
@cached('crossref-bibtex.json')
def fetch_bibtex_by_doi(doi):
url = "http://api.crossref.org/works/"+doi+"/transform/application/x-bibtex"
work = Works(etiquette=my_etiquette)
response = work.do_http_request('get', url, custom_header={'user-agent': str(work.etiquette)})
if response.ok:
bibtex = response.text.strip()
return bibtex
raise DOIRequestError(repr(doi)+': '+response.text)
@cached('crossref.json')
def fetch_json_by_doi(doi):
url = "http://api.crossref.org/works/"+doi+"/transform/application/json"
work = Works(etiquette=my_etiquette)
jsontxt = work.do_http_request('get', url, custom_header={'user-agent': str(work.etiquette)}).text
return jsontxt.dumps(json)
def _get_page_fast(pagerequest):
"""Return the data for a page on scholar.google.com"""
import scholarly.scholarly as scholarly
resp = scholarly._SESSION.get(pagerequest, headers=scholarly._HEADERS, cookies=scholarly._COOKIES)
if resp.status_code == 200:
return resp.text
else:
raise Exception('Error: {} {}'.format(resp.status_code, resp.reason))
def _scholar_score(txt, bib):
# high score means high similarity
from rapidfuzz.fuzz import token_set_ratio
return sum(token_set_ratio(bib[k], txt) for k in ['title', 'author', 'abstract'] if k in bib)
@cached('scholar-bibtex.json', hashed_key=True)
def fetch_bibtex_by_fulltext_scholar(txt, assess_results=True):
import scholarly.scholarly
scholarly._get_page = _get_page_fast # remove waiting time
logger.debug(txt)
search_query = scholarly.search_pubs_query(txt)
# get the most likely match of the first results
results = list(search_query)
if len(results) > 1 and assess_results:
maxscore = 0
result = results[0]
for res in results:
score = _scholar_score(txt, res.bib)
if score > maxscore:
maxscore = score
result = res
else:
result = results[0]
# use url_scholarbib to get bibtex from google
if getattr(result, 'url_scholarbib', ''):
bibtex = scholarly._get_page(result.url_scholarbib).strip()
else:
raise NotImplementedError('no bibtex import link. Make crossref request using title?')
return bibtex
def _crossref_get_author(res, sep='; '):
return sep.join([p.get('given','') + p['family'] for p in res.get('author',[]) if 'family' in p])
def _crossref_score(txt, r):
# high score means high similarity
from rapidfuzz.fuzz import token_set_ratio
score = 0
if 'author' in r:
author = ' '.join([p['family'] for p in r.get('author',[]) if 'family' in p])
score += token_set_ratio(author, txt)
if 'title' in r:
score += token_set_ratio(r['title'][0], txt)
if 'abstract' in r:
score += token_set_ratio(r['abstract'], txt)
return score
def crossref_to_bibtex(r):
"""convert crossref result to bibtex
"""
bib = {}
if 'author' in r:
family = lambda p: p['family'] if len(p['family'].split()) == 1 else '{'+p['family']+'}'
bib['author'] = ' and '.join([family(p) + ', '+ p.get('given','')
for p in r.get('author',[]) if 'family' in p])
# for k in ['issued','published-print', 'published-online']:
k = 'issued'
if k in r and 'date-parts' in r[k] and len(r[k]['date-parts'])>0:
date = r[k]['date-parts'][0]
bib['year'] = str(date[0])
if len(date) >= 2:
bib['month'] = str(date[1])
# break
if 'DOI' in r: bib['doi'] = r['DOI']
if 'URL' in r: bib['url'] = r['URL']
if 'title' in r: bib['title'] = r['title'][0]
if 'container-title' in r: bib['journal'] = r['container-title'][0]
if 'volume' in r: bib['volume'] = r['volume']
if 'issue' in r: bib['number'] = r['issue']
if 'page' in r: bib['pages'] = r['page']
if 'publisher' in r: bib['publisher'] = r['publisher']
# entry type
type = bib.get('type','journal-article')
type_mapping = {'journal-article':'article'}
bib['ENTRYTYPE'] = type_mapping.get(type, type)
# bibtex key
year = str(bib.get('year','0000'))
if 'author' in r:
ID = r['author'][0]['family'] + '_' + year
else:
ID = year
bib['ID'] = ID
db = bibtexparser.bibdatabase.BibDatabase()
db.entries.append(bib)
return bibtexparser.dumps(db)
# @cached('crossref-bibtex-fulltext.json', hashed_key=True)
def fetch_bibtex_by_fulltext_crossref(txt, **kw):
work = Works(etiquette=my_etiquette)
logger.debug('crossref fulltext seach:\n'+txt)
# get the most likely match of the first results
# results = []
# for i, r in enumerate(work.query(txt).sort('score')):
# results.append(r)
# if i > 50:
# break
query = work.query(txt, **kw).sort('score')
query_result = query.do_http_request('get', query.url, custom_header={'user-agent':str(query.etiquette)}).text
results = json.loads(query_result)['message']['items']
if len(results) > 1:
maxscore = 0
result = results[0]
for res in results:
score = _crossref_score(txt, res)
if score > maxscore:
maxscore = score
result = res
logger.info('score: '+str(maxscore))
elif len(results) == 0:
raise ValueError('crossref fulltext: no results')
else:
result = results[0]
# convert to bibtex
return crossref_to_bibtex(result).strip()
def fetch_entry(e):
if 'doi' in e and isvaliddoi(e['doi']):
bibtex = fetch_bibtex_by_doi(e['doi'])
else:
kw = {}
if e.get('author',''):
kw['author'] = latex_to_unicode(family_names(e['author']))
if e.get('title',''):
kw['title'] = latex_to_unicode(family_names(e['title']))
if kw:
bibtex = fetch_bibtex_by_fulltext_crossref('', **kw)
else:
ValueError('no author not title field')
db = bibtexparser.loads(bibtex)
return db.entries[0]
|
{
"content_hash": "75b95e01ae15a7d2147f19a93969dc77",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 119,
"avg_line_length": 30.6520618556701,
"alnum_prop": 0.5970739090221139,
"repo_name": "perrette/myref",
"id": "ab430e7c62c0181e5f12e9296edd465c7110e0b8",
"size": "11893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "papers/extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "314500"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.