text
stringlengths 2
999k
|
|---|
from django.test import TestCase
from django_keycloak.factories import OpenIdConnectProfileFactory
from django_keycloak.tests.mixins import MockTestCaseMixin
from django_keycloak.auth.backends import KeycloakAuthorizationBase
class BackendsKeycloakAuthorizationBaseHasPermTestCase(
MockTestCaseMixin, TestCase):
def setUp(self):
self.backend = KeycloakAuthorizationBase()
self.profile = OpenIdConnectProfileFactory()
self.setup_mock(
'django_keycloak.auth.backends.KeycloakAuthorizationBase.'
'get_all_permissions',
return_value=[
{
'resource_set_name': 'Resource',
'scopes': [
'Read',
'Update'
]
},
{
'resource_set_name': 'Resource2'
}
]
)
def test_resource_scope_should_have_permission(self):
"""
Case: Permission is expected that is available to the user.
Expected: Permission granted.
"""
permission = self.backend.has_perm(
user_obj=self.profile.user, perm='Resource.Read')
self.assertTrue(permission)
def test_resource_no_scope_should_not_have_permission(self):
""""
Case: Permission is formatted as resource only which does not exist as
such in the RPT.
Expected: Permission denied.
"""
permission = self.backend.has_perm(
user_obj=self.profile.user, perm='Resource')
self.assertFalse(permission)
def test_resource_other_scope_should_not_have_permission(self):
""""
Case: Permission is expected with a scope that is not available to
the user according to the RPT.
Expected: Permission denied.
"""
permission = self.backend.has_perm(
user_obj=self.profile.user, perm='Resource.Create')
self.assertFalse(permission)
def test_other_resource_other_scope_should_not_have_permission(self):
""""
Case: Permission is expected that is not available to the user
according to the RPT.
Expected: Permission denied.
"""
permission = self.backend.has_perm(
user_obj=self.profile.user, perm='OtherResource.OtherScope')
self.assertFalse(permission)
def test_resource_no_scope_should_have_permission(self):
""""
Case: Permission is expected with no scope provided, but scope is
also not provided in the RPT.
Expected: Permission granted.
"""
permission = self.backend.has_perm(
user_obj=self.profile.user, perm='Resource2')
self.assertTrue(permission)
|
#!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Module: benchmark_fma.py
# Purpose: Benchmark tests for 'arrayfunc' functions.
# Language: Python 3.5
# Date: 20-Dec-2018.
# Ver: 07-Sep-2021.
#
###############################################################################
#
# Copyright 2014 - 2021 Michael Griffin <m12.griffin@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
##############################################################################
import time
import array
import itertools
import math
import platform
import json
import collections
import argparse
import arrayfunc
##############################################################################
########################################################
def InitOptionData(arraycode, arraysize, funcname):
"""Initialise the data used only for some tests.
"""
odata = collections.namedtuple('optiondata', ['truediv_type', 'ldexp_y',
'compval', 'pycomp', 'startcycle', 'endcycle',
'invertmaxval', 'invertop', 'fidataout'])
optiondata = odata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
optiondata.truediv_type = float
else:
optiondata.truediv_type = int
# Function ldexp needs a specific array type as the second parameter.
if funcname == 'ldexp':
ydata = [-2.0, -1.0, 1.0, 2.0]
optiondata.ldexp_y = int(ydata[-1])
else:
optiondata.ldexp_y = None
# This is used for some tests.
if arraycode in ('f', 'd'):
optiondata.compval = float(0)
else:
optiondata.compval = int(0)
# Used for compress.
if 'fma' == 'compress':
optiondata.compdata = array.array(arraycode, [1,0,1,0])
optiondata.pycomp = array.array(arraycode, (x for x,y in zip(itertools.cycle(optiondata.compdata), itertools.repeat(0, arraysize))))
else:
optiondata.compdata = None
optiondata.pycomp = None
# Used for cycle.
if 'fma' == 'cycle':
optiondata.startcycle = comptype(arraycode, 0)
optiondata.endcycle = comptype(arraycode, 127)
else:
optiondata.startcycle = None
optiondata.endcycle = None
# Used for invert.
if 'fma' == 'invert':
optiondata.invertmaxval = allinvertlimits[arraycode]
if arraycode in ('b', 'h', 'i', 'l', 'q'):
optiondata.invertop = invertpysigned
else:
optiondata.invertop = invertpyunsigned
else:
optiondata.invertmaxval = None
optiondata.invertop = None
# Used for findindices.
if 'fidataout' in ('dataout'):
optiondata.fidataout = array.array('q', itertools.repeat(0, arraysize))
else:
optiondata.fidataout = None
return optiondata
########################################################
def InitDataArrays(arraycode, arraysize):
"""Initialise the data arrays used to run the tests.
"""
adata = collections.namedtuple('arraydata', ['datax', 'dataout',
'yvalue', 'zvalue', 'arraylength'])
arraydata = adata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
xdata = [float(x) for x in [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]]
else:
xdata = [int(x) for x in [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]]
arraydata.datax = array.array(arraycode, (x for x,y in zip(itertools.cycle(xdata), itertools.repeat(0, arraysize))))
assert len(arraydata.datax) == arraysize, 'datax is not expected length %d' % len(arraydata.datax)
arraydata.arraylength = len(arraydata.datax)
# Y data.
ydata = [-2.0, -1.0, 1.0, 2.0]
if len(ydata) > 0:
yvalue = abs(ydata[-1])
if arraycode in ('f', 'd'):
arraydata.yvalue = float(yvalue)
else:
arraydata.yvalue = int(yvalue)
else:
arraydata.yvalue = None
# Z data.
zdata = [-2.0, -1.0, 1.0, 2.0]
if len(zdata) > 0:
zvalue = abs(zdata[-1])
if arraycode in ('f', 'd'):
arraydata.zvalue = float(zvalue)
else:
arraydata.zvalue = int(zvalue)
else:
arraydata.zvalue = None
# Output array.
if 'dataout' in ('dataout'):
arraydata.dataout = array.array(arraycode, itertools.repeat(0, arraydata.arraylength))
assert len(arraydata.dataout) == arraysize, 'dataout is not expected length %d' % len(arraydata.dataout)
else:
arraydata.dataout = None
return arraydata
########################################################
def calibrateruntime(arraycode, arraysize, arraydata, optiondata, runtimetarget):
"""Calibrate the run time for Python and default ArrayFunc.
"""
pyitercounts = 1
afitercounts = 50
# First, do a timing calibration run.
# Python native time.
pytime = BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata)
# Arrayfunc time.
aftime = BenchmarkAF(afitercounts, arraycode, arraydata, optiondata)
# Now calculate the average execution time and adjust the iterations
# so that the tests will take approximately 0.1 seconds.
# The time returned by the benchmark function is per iteration, so
# we don't need to adjust for this again.
pyitercounts = int(runtimetarget / pytime)
afitercounts = int(runtimetarget / aftime)
# Make sure the iteration count is at least 1.
if pyitercounts < 1:
pyitercounts = 1
if afitercounts < 1:
afitercounts = 1
return pyitercounts, afitercounts
########################################################
def calibratesimdruntime(arraycode, arraydata, optiondata, runtimetarget):
"""Calibrate the run time with SIMD disabled.
"""
afitersidmcounts = 50
# Arrayfunc time without SIMD for functions with SIMD.
aftimenosimd = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, arraydata, optiondata)
afitersidmcounts = int(runtimetarget / aftimenosimd)
if afitersidmcounts < 1:
afitersidmcounts = 1
return afitersidmcounts
########################################################
def BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata):
"""Measure execution time of native Python code.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
arraylength = arraydata.arraylength
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
truediv_type = optiondata.truediv_type
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
invertmaxval = optiondata.invertmaxval
invertop = optiondata.invertop
# Time for python.
starttime = time.perf_counter()
if True:
for x in range(pyitercounts):
for i in range(arraylength):
dataout[i] = datax[i] * yvalue + zvalue
else:
for x in range(pyitercounts):
dataout[i] = datax[i] * yvalue + zvalue
endtime = time.perf_counter()
pythontime = (endtime - starttime) / pyitercounts
return pythontime
########################################################
def BenchmarkAF(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with defaults.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.fma(datax, yvalue, zvalue, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with MathErrors ignored and SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.fma(datax, yvalue, zvalue, dataout, matherrors=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.fma(datax, yvalue, zvalue, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdFalse(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with matherrors=True.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.fma(datax, yvalue, zvalue, dataout, matherrors=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
##############################################################################
def GetCmdArguments():
""" Get any command line arguments. These modify the operation of the program.
rawoutput = If specified, will output raw data instead of a report.
mintest = If specified, will do a minimal test.
arraysize = Size of the array in elements.
runtimetarget = The target length of time in seconds to run a benchmark for.
"""
arraysize = 100000
runtimetarget = 0.1
# Get any command line arguments.
parser = argparse.ArgumentParser()
# Output just the raw data.
parser.add_argument('--rawoutput', action = 'store_true', help = 'Output raw data.')
# Do a minimal test. This will save time when full results are not required.
parser.add_argument('--mintest', action = 'store_true', help = 'Do minimal test.')
# Size of the test arrays.
parser.add_argument('--arraysize', type = int, default = arraysize,
help='Size of test arrays in number of elements.')
# The length of time to run each benchmark.
parser.add_argument('--runtimetarget', type = float, default = runtimetarget,
help='Target length of time to run each benchmark for.')
args = parser.parse_args()
return args
##############################################################################
CmdArgs = GetCmdArguments()
ArraySize = CmdArgs.arraysize
RunTimeTarget = CmdArgs.runtimetarget
##############################################################################
# Run the benchmarks.
funcname = 'fma'
supportedarrays = ('f', 'd')
# True if platform supports SIMD.
PlatformHasSIMD = arrayfunc.simdsupport.hassimd
# Detect the hardware platform, and assign the correct platform data table to it.
def platformdetect():
""" Return a string containing the array codes if the machine supports
SIMD for this function. The results will vary depending upon which platform
it is running on.
"""
# These are the supported options for SIMD. The values depend on
# the particular function in question.
# i686 = 32 bit x86, this never has SIMD.
# x86_64 = 64 bit x86, supported on Linux with GCC only.
# armv7l = 32 bit ARM, for Raspberry Pi 3 with 32 bit Linux.
# aarch64 = 64 bit ARM, for Raspberry Pi 3 or 4 with 64 bit Linux.
# These values were derived from the platform data reported by the benchmark.
signatures = {
'i686' : '',
'x86_64' : '',
'armv7l' : '',
'aarch64' : '',
}
return signatures.get(platform.machine(), '')
if PlatformHasSIMD:
SIMDArrays = platformdetect()
else:
SIMDArrays = ''
# Uses SIMD on at least one array type.
HasSIMDOption = len(SIMDArrays) > 0
##############################################################################
# True if this benchmark allows math error detection to be turned off.
# We check a copy of the equation from the template in order to check this.
# Note: Need double quotes around the equation because some functions contain
# a string with single quotes, and this would cause a conflict if we used single
# quotes to enclose this.
HasMathErrorOption = 'matherrors' in "arrayfunc.fma(datax, yvalue, zvalue, dataout, matherrors=True)"
##############################################################################
# Used to collect the results.
PyData = {}
AfData = {}
AfDataErrTrueSimdTrue = {}
AfDataErrFalseSimdTrue = {}
AfDataErrTrueSimdFalse = {}
# Test using each array type.
for arraycode in supportedarrays:
# This array type supports SIMD. Some functions do not support SIMD at all,
# while others support it only for some array types on some platforms.
ArrayHasSIMD = arraycode in SIMDArrays
# Initialise the data arrays.
ArrayData = InitDataArrays(arraycode, ArraySize)
# Initialise the optional data elements that are only used for some tests.
OptionData = InitOptionData(arraycode, ArraySize, funcname)
# Calibrate the test runtime targets.
pyitercounts, afitercounts = calibrateruntime(arraycode, ArraySize, ArrayData, OptionData, RunTimeTarget)
if ArrayHasSIMD:
afitersidmcounts = calibratesimdruntime(arraycode, ArrayData, OptionData, RunTimeTarget)
# Benchmark the Python implementation.
PyData[arraycode] = BenchmarkPython(pyitercounts, arraycode, ArraySize, ArrayData, OptionData)
# Benchmark the Arrayfunc implementation with default parameters.
# This covers user requested minimal tests, plus functions which do not
# have either error checking or SIMD.
AfData[arraycode] = BenchmarkAF(afitercounts, arraycode, ArrayData, OptionData)
# A minimal test only involves the default parameters.
if not CmdArgs.mintest:
# Function has error checking but not SIMD. Test error checking turned off.
# The default case covers with error checking turned on.
if HasMathErrorOption and not ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function does not have error checking but does have SIMD.
# Test SIMD turned off. The default case covers with SIMD turned on.
if (not HasMathErrorOption) and ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function has both error checking and SIMD. Check for:
# error checking on and SIMD off,
# error checking off and SIMD off,
# error checking off and SIMD on
if HasMathErrorOption and ArrayHasSIMD:
AfDataErrFalseSimdTrue[arraycode] = BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdFalse[arraycode] = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, ArrayData, OptionData)
##############################################################################
##############################################################################
# Report the benchmarks.
# The format string used to print out results in stand alone mode.
def sformatter(pos, val):
if val is None:
return 17 * ' '
elif (val is not None) and (1.0 <= val < 10.0):
return '{%d:>8.1f} ' % (pos + 1)
elif (val is not None) and (val < 1.0):
return '{%d:>8.2f} ' % (pos + 1)
else:
return '{%d:>8.0f} ' % (pos + 1)
def printline(label1, col2, col3, col4, col5):
lineresult = [col2, col3, col4, col5]
standformat = '{0:^7}' + ''.join([sformatter(x,y) for x,y in enumerate(lineresult)])
print(standformat.format(label1, col2, col3, col4, col5))
# Report labels will vary depending on the options available with this function.
if HasMathErrorOption and HasSIMDOption:
theaderlabels = 'Err on SIMD off Err off SIMD off Err off SIMD on'
elif HasMathErrorOption and (not HasSIMDOption):
theaderlabels = ' Error check off'
elif (not HasMathErrorOption) and HasSIMDOption:
theaderlabels = ' SIMD off'
else:
theaderlabels = ''
theader = """
Function = {0}
======= ================ ================ ================ ================
Array AF vs Python {1}
======= ================ ================ ================ ================""".format(funcname, theaderlabels)
tfooter = '======= ================ ================ ================ ================'
def calcstats(statscolumn):
"""Calculate the states for a column of data.
Return the average, max, and min.
If the data column is empty, return None for each value.
"""
if len(statscolumn) > 0:
return sum(statscolumn) / len(statscolumn), max(statscolumn), min(statscolumn)
else:
return None, None, None
########################################################
def outputstandalone():
"""Output the results for when the benchmark is run in standalone mode.
This outputs whatever data is present, and so inherently adapts
itself to functions which have varying test options.
"""
totalpyrel = []
totalmathnosimdrel = []
totalsimdvsnosimd = []
totalnoerrwithsimd = []
print(theader)
for x in supportedarrays:
# Default versus native Python.
pyafrel = PyData[x] / AfData[x]
totalpyrel.append(pyafrel)
# Default versus math error checking on and no SIMD.
# If the function doesn't use SIMD then comparing it with SIMD off
# is pointless. Also skip for array types which don't use SIMD or
# for minimal tests.
if x in AfDataErrFalseSimdTrue:
mathnosimdrel = AfData[x] / AfDataErrFalseSimdTrue[x]
totalmathnosimdrel.append(mathnosimdrel)
else:
mathnosimdrel = None
# Default versus no error checking and no SIMD.
# If the function doesn't use math error checking then comparing it
# with math error off is pointless. Also skip for minimal tests.
if x in AfDataErrTrueSimdTrue:
simdnoerrnosimdrel = AfData[x] / AfDataErrTrueSimdTrue[x]
totalsimdvsnosimd.append(simdnoerrnosimdrel)
else:
simdnoerrnosimdrel = None
# No data exists if SIMD is not available.
if x in AfDataErrTrueSimdFalse:
# Default versus error checking turned off but SIMD enabled.
noerrwithsimd = AfData[x] / AfDataErrTrueSimdFalse[x]
totalnoerrwithsimd.append(noerrwithsimd)
else:
noerrwithsimd = None
printline(x, pyafrel, mathnosimdrel, simdnoerrnosimdrel, noerrwithsimd)
print(tfooter)
print()
print(tfooter)
# Calculate stats.
# Default versus native Python.
col2avg, col2max, col2min = calcstats(totalpyrel)
# Default versus math error checking on and no SIMD.
col3avg, col3max, col3min = calcstats(totalmathnosimdrel)
# Default versus no error checking and no SIMD.
col4avg, col4max, col4min = calcstats(totalsimdvsnosimd)
# Default versus error checking turned off but SIMD enabled.
col5avg, col5max, col5min = calcstats(totalnoerrwithsimd)
printline('avg', col2avg, col3avg, col4avg, col5avg)
printline('max', col2max, col3max, col4max, col5max)
printline('min', col2min, col3min, col4min, col5min)
print(tfooter)
########################################################
# If raw data is requested, output the raw numbers as JSON.
# This will normally be used by a parent process which called this
# benchmark as a child process.
if CmdArgs.rawoutput:
# Called by another process, return data as json.
testresults = {'pydata' : PyData,
'afdata' : AfData,
'afdataerrtruesimdtrue' : AfDataErrTrueSimdTrue,
'afdataerrtruesimdfalse' : AfDataErrTrueSimdFalse,
'afdataerrfalsesimdtrue' : AfDataErrFalseSimdTrue,
'benchname' : 'arrayfunc',
}
print(json.dumps(testresults))
else:
# If standalone, print out data in readable format.
outputstandalone()
##############################################################################
|
import pandas as pd
import sys
from sentence_transformers import SentenceTransformer, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, BinaryClassificationEvaluator
# Get the label as the value in the second place of the string (e.g., '(0, 5)' returns 0)
def get_lab(x):
return x[1]
# Binarize the labels as in the original paper: on the training set discard 2 and the above values are positive, the values below are negative; in the test set discard 3 and the above values are positive, the values below are negative. The test set is labeled by experts, the train set is not.
def get_binary_label_train(x):
label = int(x)
if label>=3:
return 1
elif label<=1:
return 0
else:
return -1
def get_binary_label_test(x):
label = int(x)
if label>=4:
return 1
elif label<=2:
return 0
else:
return -1
# Normalize labels to [0, 1]
def get_cont_label(x):
label = float(x)
return label/5.
model_name = sys.argv[1]
batch_size = 512
# Load train, dev and test sets
df_train = pd.read_csv('SemEval-PIT2015-github/data/train.data',
sep='\t', header=None)
df_train['x'] = df_train[4].apply(get_lab)
df_train['cont_label'] = df_train['x'].apply(get_cont_label)
df_train['bin_label'] = df_train['x'].apply(get_binary_label_train)
df_dev = pd.read_csv('SemEval-PIT2015-github/data/dev.data',
sep='\t', header=None)
df_dev['x'] = df_dev[4].apply(get_lab)
df_dev['cont_label'] = df_dev['x'].apply(get_cont_label)
df_dev['bin_label'] = df_dev['x'].apply(get_binary_label_train)
df_test = pd.read_csv('SemEval-PIT2015-github/data/test.data',
sep='\t', header=None)
df_test['cont_label'] = df_test[4].apply(get_cont_label)
df_test['bin_label'] = df_test[4].apply(get_binary_label_test)
binary_train_examples = [InputExample(texts=[x[2], x[3]], label=x[9])
for x in df_train[df_train['bin_label']!=-1].values]
cont_train_examples = [InputExample(texts=[x[2], x[3]], label=x[8])
for x in df_train.values]
binary_dev_examples = [InputExample(texts=[x[2], x[3]], label=x[9])
for x in df_dev[df_dev['bin_label']!=-1].values]
cont_dev_examples = [InputExample(texts=[x[2], x[3]], label=x[8])
for x in df_dev.values]
binary_test_examples = [InputExample(texts=[x[2], x[3]], label=x[8])
for x in df_test[df_test['bin_label']!=-1].values]
cont_test_examples = [InputExample(texts=[x[2], x[3]], label=x[7])
for x in df_test.values]
train_binary_evaluator = BinaryClassificationEvaluator.from_input_examples(binary_train_examples,
batch_size=batch_size,
name='PIT-binary_train')
train_cont_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(cont_train_examples,
batch_size=batch_size,
name='PIT-cont_train')
dev_binary_evaluator = BinaryClassificationEvaluator.from_input_examples(binary_dev_examples,
batch_size=batch_size,
name='PIT-binary_dev')
dev_cont_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(cont_dev_examples,
batch_size=batch_size,
name='PIT-cont_dev')
test_binary_evaluator = BinaryClassificationEvaluator.from_input_examples(binary_test_examples,
batch_size=batch_size,
name='PIT-binary_test')
test_cont_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(cont_test_examples,
batch_size=batch_size,
name='PIT-cont_test')
# Load the model
if model_name[:4] == 'orig': # If it is not pretrained
try:
os.mkdir(model_name)
except OSError:
print ("Creation of the directory %s failed" % model_name)
word_embedding_model = models.Transformer(model_name[5:], max_seq_length=512)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
else:
model = SentenceTransformer(model_name)
if model_name[:2] != 'my':
model_name = 'my-'+model_name
output_path = model_name.replace('/', '-')
# Test the model on binarized and continuous data
train_binary_evaluator(model, output_path=output_path)
train_cont_evaluator(model, output_path=output_path)
dev_binary_evaluator(model, output_path=output_path)
dev_cont_evaluator(model, output_path=output_path)
test_binary_evaluator(model, output_path=output_path)
test_cont_evaluator(model, output_path=output_path)
|
# -*- coding: utf-8 -*-
import hashlib
from .restful import check_res, session_get, session_post, session_delete, session_put
from data import SessionModel
def calPassword(pwd: str):
sha = hashlib.sha256()
sha.update(bytes(pwd, encoding='utf8'))
return sha.hexdigest()
class UserService:
@staticmethod
def query_users(session: SessionModel, userid: int = None, name: str = None):
data = {}
if userid:
data["userid"] = userid
if name:
data["name"] = name
r = session_get(session, "user", "user", params=data) if data else session_get(session, "user", "user")
check_res(r)
data = r["data"]
return data if data is not None else []
@staticmethod
def add_user(session: SessionModel, name: str, password: str):
r = session_post(session, "user", "user", json={"user": name, "password": calPassword(password)})
check_res(r)
return r
@staticmethod
def remove_user(session: SessionModel, userid):
r = session_delete(session, "user", "user", json={"userid": userid})
check_res(r)
return r
@staticmethod
def reset_password(session: SessionModel, userid):
r = session_put(session, "user", "reset_password", json={"userid": userid})
check_res(r)
return r
@staticmethod
def change_password(session: SessionModel, old_pwd, new_pwd, confirm_pwd):
r = session_put(
session,
"user",
"change_password",
json=dict(old=calPassword(old_pwd), new=calPassword(new_pwd), confirm=calPassword(confirm_pwd))
)
check_res(r)
return r
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 4 20:48:38 2018
@author: James Jiang
"""
all_lines = [line.rstrip('\n') for line in open('Data.txt')]
all_containers = [int(i) for i in all_lines]
all_containers.sort(reverse=True)
total_number = 2**(len(all_containers))
count = 0
min_num = len(all_containers)
i = 0
while i < total_number:
number_bin = bin(i)[2:].zfill(20)
sum_containers = 0
for j in range(len(all_containers)):
if number_bin[j] == '1':
sum_containers += all_containers[j]
if sum_containers > 150:
break
if sum_containers == 150:
if number_bin.count('1') == min_num:
count += 1
i += 2**number_bin[::-1].index('1')
elif number_bin.count('1') < min_num:
count = 0
i = 0
min_num = number_bin.count('1')
else:
i += 1
else:
i += 1
print(count)
|
class CandlestickInterval:
MIN1 = "1min"
MIN5 = "5min"
MIN15 = "15min"
MIN30 = "30min"
MIN60 = "60min"
HOUR4 = "4hour"
DAY1 = "1day"
MON1 = "1mon"
WEEK1 = "1week"
YEAR1 = "1year"
INVALID = None
class OrderSide:
BUY = "buy"
SELL = "sell"
INVALID = None
class TradeDirection:
BUY = "buy"
SELL = "sell"
INVALID = None
class OrderType:
SELL_LIMIT = "sell-limit"
BUY_LIMIT = "buy-limit"
BUY_MARKET = "buy-market"
SELL_MARKET = "sell-market"
BUY_IOC = "buy-ioc"
SELL_IOC = "sell-ioc"
BUY_LIMIT_MAKER = "buy-limit-maker"
SELL_LIMIT_MAKER = "sell-limit-maker"
BUY_STOP_LIMIT = "buy-stop-limit"
SELL_STOP_LIMIT = "sell-stop-limit"
BUY_LIMIT_FOK = "buy-limit-fok"
SELL_LIMIT_FOK = "sell-limit-fok"
BUY_STOP_LIMIT_FOK = "buy-stop-limit-fok"
SELL_STOP_LIMIT_FOK = "sell-stop-limit-fok"
INVALID = None
class AlgoOrderType:
LIMIT = "limit"
MARKET = "market"
class AlgoOrderStatus:
CANCELED = "canceled"
REJECTED = "rejected"
TRIGGERED = "triggered"
class AccountType:
SPOT = "spot"
MARGIN = "margin"
OTC = "otc"
POINT = "point"
MINEPOLL = "minepool"
ETF = "etf"
AGENCY = "agency"
SUPER_MARGIN = "super-margin"
INVALID = None
class AccountState:
WORKING = "working"
LOCK = "lock"
INVALID = None
class AccountPointState:
WORKING = "working"
LOCK = "lock"
INVALID = None
FL_SYS = "fl-sys"
FL_MGT = "fl-mgt"
FL_END = "fl-end"
FL_NEGATIVE = "fl-negative"
class AccountBalanceUpdateType:
TRADE = "trade"
FROZEN = "frozen"
LOAN = "loan"
INTEREST = "interest"
LOAN_AVAILABLE = "loan-available"
TRANSFER_OUT_AVAILABLE = "transfer-out-available"
INVALID = None
class WithdrawState:
SUBMITTED = "submitted"
REEXAMINE = "reexamine"
CANCELED = "canceled"
PASS = "pass"
REJECT = "reject"
PRETRANSFER = "pre-transfer"
WALLETTRANSFER = "wallet-transfer"
WALEETREJECT = "wallet-reject"
CONFIRMED = "confirmed"
CONFIRMERROR = "confirm-error"
REPEALED = "repealed"
VERIFYING = "verifying"
FAILED = "failed"
INVALID = None
class DepositWithdraw:
DEPOSIT = "deposit"
WITHDRAW = "withdraw"
class DepositState:
CONFIRMING = "confirming"
SAFE = "safe"
CONFIRMED = "confirmed"
ORPHAN = "orphan"
INVALID = None
class LoanOrderState:
CREATED = "created"
ACCRUAL = "accrual"
CLEARED = "cleared"
FAILED = "failed"
INVALID = None
class OrderSource:
SYS = "sys"
WEB = "web"
API = "api"
APP = "app"
FL_SYS = "fl-sys"
FL_MGT = "fl-mgt"
SPOT_WEB = "spot-web"
SPOT_API = "spot-api"
SPOT_APP = "spot-app"
MARGIN_API = "margin-api"
MARGIN_WEB = "margin-web"
MARGIN_APP = "margin-app"
SUPER_MARGIN_API = "super-margin-api"
SUPER_MARGIN_WEB = "super-margin-web"
SUPER_MARGIN_APP = "super-margin-app"
SUPER_MARGIN_FL_SYS = "super-margin-fl-sys"
SUPER_MARGIN_FL_MGT = "super-margin-fl-mgt"
INVALID = None
class OrderState:
CREATED = "created" # for stop loss order
PRE_SUBMITTED = "pre-submitted"
SUBMITTING = "submitting"
SUBMITTED = "submitted"
PARTIAL_FILLED = "partial-filled"
CANCELLING = "cancelling"
PARTIAL_CANCELED = "partial-canceled"
FILLED = "filled"
CANCELED = "canceled"
FAILED = "failed"
PLACE_TIMEOUT = "place_timeout"
INVALID = None
class TimeInForceType:
IOC = "ioc"
FOK = "fok"
BOC = "boc"
GTC = "gtc"
class TransferMasterType:
IN = "master-transfer-in"
OUT = "master-transfer-out"
POINT_IN = "master-point-transfer-in"
POINT_OUT = "master-point-transfer-out"
INVALID = None
class EtfStatus:
NORMAL = "1"
REBALANCING_START = "2"
CREATION_AND_REDEMPTION_SUSPEND = "3"
CREATION_SUSPEND = "4"
REDEMPTION_SUSPEND = "5"
INVALID = None
class EtfSwapType:
IN = "1"
OUT = "2"
INVALID = None
class AccountChangeType:
NEWORDER = "order.place"
TRADE = "order.match"
REFUND = "order.refund"
CANCELORDER = "order.cancel"
FEE = "order.fee-refund"
TRANSFER = "margin.transfer"
LOAN = "margin.loan"
INTEREST = "margin.interest"
REPAY = "margin.repay"
OTHER = "other"
INVALID = None
class BalanceMode:
AVAILABLE = "0"
TOTAL = "1"
INVALID = None
class AccountBalanceMode:
BALANCE = "0"
TOTAL = "1"
INVALID = None
class OperateMode:
PING = "ping"
PONG = "pong"
INVALID = None
class QueryDirection:
PREV = "prev"
NEXT = "next"
INVALID = None
class TransferFuturesPro:
TO_PRO = "futures-to-pro"
TO_FUTURES = "pro-to-futures"
class MatchRole:
MAKER = "maker"
TAKER = "taker"
class DepthStep:
STEP0 = "step0"
STEP1 = "step1"
STEP2 = "step2"
STEP3 = "step3"
STEP4 = "step4"
STEP5 = "step5"
class DepthSize:
SIZE5 = 5
SIZE10 = 10
SIZE20 = 20
class MbpLevel:
MBP5 = 5
MBP10 = 10
MBP20 = 20
MBP150 = 150
class ChainDepositStatus:
ALLOWED = "allowed"
PROHIBITED = "prohibited"
INVALID = None
class ChainWithdrawStatus:
ALLOWED = "allowed"
PROHIBITED = "prohibited"
INVALID = None
class InstrumentStatus:
NORMAL = "normal"
DELISTED = "delisted"
INVALID = None
class AccountChangeType:
ORDER_PLACE = "order-place"
ORDER_MATCH = "order-match"
ORDER_REFUND = "order-refund"
ORDER_CANCEL = "order-cancel"
ORDER_FEE_REFUND = "order-fee-refund"
MARGIN_TRANSFER = "margin-transfer"
MARGIN_LOAN = "margin-loan"
MARGIN_INTEREST = "margin-interest"
MARGIN_REPAY = "margin-repay"
OTHER = "other"
DEPOSIT = "deposit"
WITHDRAW = "withdraw"
INVALID = None
class FeeDeductType:
DEDUCT_BY_HT = "ht"
DEDUCT_BY_POINT = "point"
INVALID = None
class SubUidAction:
UNLOCK = "unlock"
LOCK = "lock"
INVALID = None
class SubUidState:
NORMAL = "normal"
LOCK = "lock"
INVALID = None
class OrderUpdateEventType:
CREATION = "creation"
TRADE = "trade"
CANCELLATION = "cancellation"
INVALID = None
class AccountTransactType:
TRADE = "trade"
ETF = "etf"
TRANSACT_FEE = "transact-fee"
DEDUCTION = "deduction"
TRANSFER = "transfer"
CREDIT = "credit"
LIQUIDATION = "liquidation"
INTEREST = "interest"
DEPOSIT = "deposit"
WITHDRAW = "withdraw"
WITHDRAW_FEE = "withdraw-fee"
EXCHANGE = "exchange"
OTHER = "other-types"
class SortDesc:
ASC = "asc"
DESC = "desc"
class SubuserTradePrivilegeType:
MARGIN = "isolated-margin"
SUPER_MARGIN = "cross-margin"
class SubUserTradeStatus:
ACTIVATED = "activated"
DEACTIVATED = "deactivated"
class MarketStatus:
NORMAL: 1
HALTED: 2
CANCEL_ONLY: 3
class HaltReason:
EMERGENCY_MAINTENANCE: 2
SCHEDULED_MAINTENANCE: 3
|
import errno
import json
import subprocess
import six
from . import constants
from . import errors
from .utils import create_environment_dict
from .utils import find_executable
class Store(object):
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = find_executable(self.program)
self.environment = environment
if self.exe is None:
raise errors.InitializationError(
'{} not installed or not available in PATH'.format(self.program))
def get(self, server):
""" Retrieve credentials for `server`.
If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound('No matching credentials in {}'.format(
self.program))
return result
def store(self, server, username, secret):
""" Store credentials for `server`.
Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
def erase(self, server):
""" Erase credentials for `server`.
Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
self._execute('erase', server)
def list(self):
""" List stored credentials.
Requires v0.4.0+ of the helper.
"""
data = self._execute('list', None)
return json.loads(data.decode('utf-8'))
def _execute(self, subcmd, data_input):
output = None
env = create_environment_dict(self.environment)
try:
if six.PY3:
output = subprocess.check_output(
[self.exe, subcmd],
input=data_input,
env=env,
)
else:
process = subprocess.Popen(
[self.exe, subcmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
)
output, _ = process.communicate(data_input)
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd='', output=output)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
'{} not installed or not available in PATH'.format(self.program))
else:
raise errors.StoreError('Unexpected OS error "{}", errno={}'.format(
e.strerror, e.errno))
return output
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/PlayerLevelSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/PlayerLevelSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n4POGOProtos/Settings/Master/PlayerLevelSettings.proto\x12\x1aPOGOProtos.Settings.Master\"\x9d\x01\n\x13PlayerLevelSettings\x12\x10\n\x08rank_num\x18\x01 \x03(\x05\x12\x1b\n\x13required_experience\x18\x02 \x03(\x05\x12\x15\n\rcp_multiplier\x18\x03 \x03(\x02\x12\x1c\n\x14max_egg_player_level\x18\x04 \x01(\x05\x12\"\n\x1amax_encounter_player_level\x18\x05 \x01(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PLAYERLEVELSETTINGS = _descriptor.Descriptor(
name='PlayerLevelSettings',
full_name='POGOProtos.Settings.Master.PlayerLevelSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rank_num', full_name='POGOProtos.Settings.Master.PlayerLevelSettings.rank_num', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='required_experience', full_name='POGOProtos.Settings.Master.PlayerLevelSettings.required_experience', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cp_multiplier', full_name='POGOProtos.Settings.Master.PlayerLevelSettings.cp_multiplier', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_egg_player_level', full_name='POGOProtos.Settings.Master.PlayerLevelSettings.max_egg_player_level', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_encounter_player_level', full_name='POGOProtos.Settings.Master.PlayerLevelSettings.max_encounter_player_level', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=85,
serialized_end=242,
)
DESCRIPTOR.message_types_by_name['PlayerLevelSettings'] = _PLAYERLEVELSETTINGS
PlayerLevelSettings = _reflection.GeneratedProtocolMessageType('PlayerLevelSettings', (_message.Message,), dict(
DESCRIPTOR = _PLAYERLEVELSETTINGS,
__module__ = 'POGOProtos.Settings.Master.PlayerLevelSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.PlayerLevelSettings)
))
_sym_db.RegisterMessage(PlayerLevelSettings)
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
import slugiot_settings
def _get_device_id():
"""
Check for device ID being present.
It makes two checks:
1. Whether there's a setting called device_id or not
2. Whether the setting device_id has any value or not
:return:
:rtype:
"""
device_id = slugiot_setup.settings.get_device_id()
# device_id_row = db(db.settings.setting_name == 'device_id').select().first()
# device_id = device_id_row.setting_value if device_id_row is not None else None
return device_id
def index():
"""
Index page for the client.
Shows option to set/view a device ID based on whether the ID exists or not
"""
# Check if settings exist already
device_id = _get_device_id()
return dict(device_id=device_id)
def settings():
"""
Settings page for the device to set device ID.
"""
device_id = _get_device_id()
# If the device_id is None, we have to enter one.
# Otherwise, we offer a form, which is view-only in general,
# and can be turned into an edit form if needed.
request_edit = request.vars.edit == 'y'
is_edit = device_id is None or request_edit
form = SQLFORM.factory(Field('device_id', default=device_id), readonly=not is_edit)
if is_edit:
form.add_button('Cancel', URL('default', 'settings'), _class='btn btn-warning')
edit_button = None if is_edit else A(T('Edit'), _href=URL('default', 'settings', vars={'edit': 'y'}), _class='btn btn-primary')
form.vars.device_id = device_id
if form.process().accepted:
slugiot_setup.settings.set_device_id(form.vars.device_id)
redirect(URL('default', 'index'))
return dict(form=form, edit_button=edit_button)
@request.restful()
def initialization():
"""
This endpoint is used to manage the initialization of the client's
device_id. Making a GET request should detail either what the
device_id is, or how to set it. Making a POST request is how it
is actually set.
Currently, this does not actually call the server to set the value,
but that shouldn't be too difficult to manage.
tpesout: I don't know how to set up a web2py form. Maybe someone
else can do that and use the functionality I added here to actually
do it?
"""
if (server_url == None):
return "Please configure your server_url field in applications/private/appconfig.ini"
def GET(*args, **vars):
device_id = slugiot_setup.settings.get_device_id()
if (device_id == None):
return "Please POST to this url the desired identifier for your device with the 'device_id' parameter"
return "Your device_id is: " + device_id
def POST(*args, **vars):
if (vars == None or not vars.has_key('device_id')):
return "Please POST to this url the desired identifier for your device with the 'device_id' parameter"
device_id = vars['device_id']
slugiot_setup.settings.set_device_id(device_id)
return "Your device_id has been set to: " + device_id
return locals()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, ramdb)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
|
# Generated by Django 2.0.4 on 2018-04-25 14:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('zoo', '0002_exhibit'),
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter Animal Name', max_length=200)),
('exhibit', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='zoo.Exhibit')),
],
),
]
|
import itertools
import copy
import sys
from locality_part import *
from street_part import *
from number_part import *
from type_part import *
# location qualifiers, eg "Main Hall", "Smith Oval"
from location_qualifier_part import *
# floor/level part, eg "First Floor", "Basement Level"
from level_part import *
from postal_part import *
from street_corner_part import *
from business_part import *
from other_part import *
from parish_part import *
# List of all possible address part types to compare against
part_types = [NumberPart, StreetPart, StreetCornerPart, LocalityPart, TypePart, LocationQualifierPart,
LevelPart, OtherPart, AllotmentPart, SectionPart, ParishPart, PostalPart, BusinessPart]
class AddressVariantPart:
"""A section of an address and a type to check it against"""
def __init__(self, part, type):
self.part = part['substring'].strip()
self.position = part['position']
self.type = type(self)
# Check if this part is a possible match for the specified type
# Eg, SMITH ST isn't a possible house number
self.cost = 0
self.penalties = []
if self.type.check():
self.valid = True
else:
self.valid = False
def __repr__(self):
return str(self.as_dict())
def as_dict(self):
return {'part': self.part, 'position': self.position, 'type': self.type.name(), 'cost': self.cost, 'penalties': self.penalties}
def get_part(self):
return self.part
def get_type(self):
return self.type
def get_position(self):
return self.position
def set_path(self, path):
self.path = path
def next_part(self):
try:
return self.path[self.position + 1]
except:
return None
def prev_part(self):
try:
return self.path[self.position - 1]
except:
return None
def get_cost(self):
# need to get position in path
cost = self.type.score()
self.cost = cost
self.penalties = self.type.get_penalties()
return cost
def get_penalties(self):
return self.penalties
def is_valid(self):
return self.valid
class AddressVariant:
"""A possible combination of parts for an address"""
def __init__(self, variant):
global part_types
self.variant = variant
self.cost = 0
self.parts = []
# Calculate costs for all possible intrepretations of each part of the
# address variant
for part in self.variant:
parts = []
# For every part of the address, check it against every possible type
#(eg if the part is "SMITH ST", check it as a house number, street name, locality name, etc)
for type in part_types:
part_variant = AddressVariantPart(part, type)
# Is this a possible type for this part of the address? (eg,
# "SMITH ST" can't be a house number)
if part_variant.is_valid():
parts.append(part_variant)
self.parts.append(parts)
def calc_cost(self, path, max_cost=100000):
cost = 0
for v in path:
v.set_path(path)
cost += v.get_cost()
if cost > max_cost:
return None
return cost
def least_cost(self, max_cost):
"""Calculates the combination of parts and types with lowest cost"""
cheapest_cost = max_cost
possible_paths = []
# Get every combination of parts and types
combinations = [[h for h in p if h] for p in self.parts]
for path in itertools.product(*combinations):
p = copy.deepcopy(path)
# Calculate total cost for this path, but break if the cost is more
# than the current cheapest cost
cost = self.calc_cost(p, cheapest_cost)
if cost is None:
continue
# Check if path has duplicated types
notes = ''
type_count = {}
type_dupe_cost = {}
for type in [v.get_type() for v in p]:
type_count[type.name()] = type_count.get(type.name(), 0) + 1
type_dupe_cost[type.name()] = type.dupe_part_cost()
for type, counts in type_count.iteritems():
if counts > 1:
# Having multiple values for the same part type (eg two
# street names) costs 50 for each duplicate part
cost += type_dupe_cost[type] * (counts - 1)
notes += 'duplicate ' + type + \
' (' + str(type_dupe_cost[type] * (counts - 1)) + ')'
if cost < cheapest_cost:
cheapest_cost = cost
current_path = {'path': p, 'cost': cost, 'notes': notes}
possible_paths.append(current_path)
if not possible_paths:
return None, None, None
# Sort possible path list by cost
cheapest = sorted(possible_paths, key=lambda v: v['cost'])[0]
cheapest_path = cheapest['path']
cheapest_cost = cheapest['cost']
cheapest_notes = cheapest['notes']
# Penalise more complex splits with more parts, prefer simple
cheapest_cost += len(cheapest_path) * 2
cheapest_notes += "path length cost " + str(len(cheapest_path) * 2)
return cheapest_path, cheapest_cost, cheapest_notes
def __repr__(self):
return str(self.parts) + ": " + str(self.penalty)
class Address:
def __init__(self, address_string):
self.address_string = address_string
# Clean up address string prior to processing
self.preprocess()
self.variants = []
self.parse()
def __repr__(self):
return self.address_string
def parse(self, show_all=False):
"""Parses the address and gets all possible variants"""
# Generate a list of all possible breakdowns for this address
# Start with the entire string
part_combinations = [
[{'position': 0, 'substring': self.address_string}]]
# Add all possible combinations of address string
for x in self.break_down():
part_combinations.append(x)
number_paths = len(part_combinations)
# Create address variants for all part combinations
self.variants = []
current_percent = 0
for index, combination in enumerate(part_combinations):
current_rounded_percent = int(20 * float(index) / number_paths)
if current_rounded_percent != current_percent:
sys.stdout.write(".")
current_percent = current_rounded_percent
self.variants.append(AddressVariant(combination))
#self.variants = [AddressVariant(combination) for combination in part_combinations]
# Calculate costs for all variants
self.costed_variants = []
max_cost = 1000000
current_percent = 0
number_variants = len(self.variants)
for index, variant in enumerate(self.variants):
current_rounded_percent = int(20 * float(index) / number_variants)
if current_rounded_percent != current_percent:
sys.stdout.write(".")
current_percent = current_rounded_percent
cheapest_variant, cost, notes = variant.least_cost(max_cost)
if cheapest_variant is None:
continue
if cost < max_cost and not show_all:
max_cost = cost
self.costed_variants.append(
{'variant': cheapest_variant, 'cost': cost, 'notes': notes})
# Sort list with cheapest variants first
self.costed_variants.sort(key=lambda v: v['cost'])
def get_costed_variants(self, show_all=False):
"""Returns all costed variants"""
self.parse(show_all)
return self.costed_variants
def get_best_variant(self):
"""Returns the best (cheapest) costed variant"""
best_variant = self.costed_variants[0]
# Further refine results by formatting the best variant
# For example, this replaces "STREET" with "ST", splits street names into parts (name/type/suffix), etc
# Also converts values to uppercase
result = {part.get_type().name(): part.get_part().upper()
for part in best_variant['variant']}
result2 = {}
for part in best_variant['variant']:
breakdown = part.get_type().breakdown()
if breakdown:
for b, v in breakdown.iteritems():
if result2.get(b):
result2[b] = result2[b] + ', ' + v
else:
result2[b] = v
else:
result2[part.get_type().name()] = part.get_part().upper()
# result.update(breakdown)
# Make result into a dictionary
result2.update({k: str(v).upper() for k, v in result2.iteritems()})
for key in result2:
result2[key] = re.sub(r'^[\.]', '', result2[key])
best_variant['result'] = result2
return best_variant
def break_down(self):
"""Breaks down a string a words into all possible combinations"""
# Position of break characters:
# At the beginning of the string
breaks = [-1]
# On special characters
breaks.extend([m.start() for m in re.finditer(
r"[;, \/\\\-]+", self.address_string)])
# At the end of string
breaks.append(len(self.address_string))
ns = range(1, len(breaks) - 1)
for n in ns:
for indices in itertools.combinations(ns, n):
# Split string at all possible combinations of break
# characters, and return a list of dicts with 'position' and
# 'substring'
yield [{'position': a[0], 'substring': a[1]} for a in enumerate([self.address_string[breaks[i] + 1:breaks[j]] for i, j in zip((0,) + indices, indices + (-1,))])]
def preprocess(self):
"""Cleans up an input string prior to processing"""
# Replace multiple spaces with single space
processed = re.sub(r"\s+", " ", self.address_string)
# Remove any spaces around ' / '
processed = re.sub(r"\s+\/\s+", "/", processed)
# Convert to lowercase and remove leading/trailing spaces
self.address_string = processed.strip().lower()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<code>[0-9]+)/annual_report$', views.annual_report, name='annual_report'),
url(r'^stock_list$', views.stock_list, name='stock_list'),
url(r'^(?P<code>[0-9]+)/tick_data$', views.tick_data, name='tick_data'),
url(r'^(?P<code>[0-9]+)/basic_info$', views.basic_info, name='basic_info'),
url(r'^(?P<code>[0-9]+)/level_0$', views.level_0, name='level_0'),
url(r'^(?P<code>[0-9]+)/level_1$', views.level_1, name='level_1'),
url(r'^(?P<code>[0-9]+)/notices$', views.notices, name='notices'),
url(r'^(?P<code>[0-9]+)/history$', views.history, name='history'),
url(r'^money_flow$', views.money_flow, name='money_flow'),
url(r'^money_flow_percent$', views.money_flow_percent, name='money_flow_percent'),
url(r'^join_interest$', views.join_interest, name='join_interest'),
url(r'^leave_interest$', views.leave_interest, name='leave_interest'),
url(r'^interest_list$', views.interest_list, name='interest_list'),
url(r'^add_comments$', views.add_comments, name='add_comments'),
url(r'^del_comments$', views.del_comments, name='del_comments'),
url(r'^update_comments$', views.update_comments, name='update_comments'),
url(r'^comments_list$', views.comments_list, name='comments_list'),
url(r'^join_position$', views.join_position, name='join_position'),
url(r'^leave_position$', views.leave_position, name='leave_position'),
url(r'^position_list$', views.position_list, name='position_list'),
url(r'^rise_fail_stats$', views.rise_fail_stats, name='rise_fail_stats'),
url(r'^szzs$', views.szzs, name='szzs'),
url(r'^bdi$', views.bdi, name='bdi'),
url(r'^shibor$', views.shibor, name='shibor'),
]
|
import numpy as np
from ..util import max_range
from .interface import Interface, DataError
class MultiInterface(Interface):
"""
MultiInterface allows wrapping around a list of tabular datasets
including dataframes, the columnar dictionary format or 2D tabular
NumPy arrays. Using the split method the list of tabular data can
be split into individual datasets.
The interface makes the data appear a list of tabular datasets as
a single dataset. The length, shape and values methods therefore
make the data appear like a single array of concatenated subpaths,
separated by NaN values.
"""
types = ()
datatype = 'multitabular'
subtypes = ['dictionary', 'dataframe', 'array', 'dask']
multi = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
new_data = []
dims = {'kdims': eltype.kdims, 'vdims': eltype.vdims}
if kdims is not None:
dims['kdims'] = kdims
if vdims is not None:
dims['vdims'] = vdims
if not isinstance(data, list):
raise ValueError('MultiInterface data must be a list tabular data types.')
prev_interface, prev_dims = None, None
for d in data:
d, interface, dims, _ = Interface.initialize(eltype, d, kdims, vdims,
datatype=cls.subtypes)
if prev_interface:
if prev_interface != interface:
raise DataError('MultiInterface subpaths must all have matching datatype.', cls)
if dims['kdims'] != prev_dims['kdims']:
raise DataError('MultiInterface subpaths must all have matching kdims.', cls)
if dims['vdims'] != prev_dims['vdims']:
raise DataError('MultiInterface subpaths must all have matching vdims.', cls)
new_data.append(d)
prev_interface, prev_dims = interface, dims
return new_data, dims, {}
@classmethod
def validate(cls, dataset, vdims=True):
if not dataset.data:
return
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
ds.interface.validate(ds, vdims)
@classmethod
def _inner_dataset_template(cls, dataset):
"""
Returns a Dataset template used as a wrapper around the data
contained within the multi-interface dataset.
"""
from . import Dataset
vdims = dataset.vdims if getattr(dataset, 'level', None) is None else []
return Dataset(dataset.data[0], datatype=cls.subtypes,
kdims=dataset.kdims, vdims=vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if not dataset.data:
# Note: Required to make empty datasets work at all (should fix)
# Other interfaces declare equivalent of empty array
# which defaults to float type
return float
ds = cls._inner_dataset_template(dataset)
return ds.interface.dimension_type(ds, dim)
@classmethod
def range(cls, dataset, dim):
if not dataset.data:
return (None, None)
ranges = []
ds = cls._inner_dataset_template(dataset)
# Backward compatibility for Contours/Polygons level
level = getattr(dataset, 'level', None)
dim = dataset.get_dimension(dim)
if level is not None and dim is dataset.vdims[0]:
return (level, level)
for d in dataset.data:
ds.data = d
ranges.append(ds.interface.range(ds, dim))
return max_range(ranges)
@classmethod
def isscalar(cls, dataset, dim):
"""
Tests if dimension is scalar in each subpath.
"""
if not dataset.data:
return True
ds = cls._inner_dataset_template(dataset)
isscalar = []
for d in dataset.data:
ds.data = d
isscalar.append(ds.interface.isscalar(ds, dim))
return all(isscalar)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
"""
Applies selectiong on all the subpaths.
"""
if not dataset.data:
return []
ds = cls._inner_dataset_template(dataset)
data = []
for d in dataset.data:
ds.data = d
sel = ds.interface.select(ds, **selection)
data.append(sel)
return data
@classmethod
def select_paths(cls, dataset, selection):
"""
Allows selecting paths with usual NumPy slicing index.
"""
return [s[0] for s in np.array([{0: p} for p in dataset.data])[selection]]
@classmethod
def aggregate(cls, columns, dimensions, function, **kwargs):
raise NotImplementedError('Aggregation currently not implemented')
@classmethod
def groupby(cls, columns, dimensions, container_type, group_type, **kwargs):
raise NotImplementedError('Grouping currently not implemented')
@classmethod
def sample(cls, columns, samples=[]):
raise NotImplementedError('Sampling operation on subpaths not supported')
@classmethod
def shape(cls, dataset):
"""
Returns the shape of all subpaths, making it appear like a
single array of concatenated subpaths separated by NaN values.
"""
if not dataset.data:
return (0, len(dataset.dimensions()))
rows, cols = 0, 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
r, cols = ds.interface.shape(ds)
rows += r
return rows+len(dataset.data)-1, cols
@classmethod
def length(cls, dataset):
"""
Returns the length of the multi-tabular dataset making it appear
like a single array of concatenated subpaths separated by NaN
values.
"""
if not dataset.data:
return 0
length = 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
length += ds.interface.length(ds)
return length+len(dataset.data)-1
@classmethod
def nonzero(cls, dataset):
return bool(dataset.data)
@classmethod
def redim(cls, dataset, dimensions):
if not dataset.data:
return dataset.data
new_data = []
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
new_data.append(ds.interface.redim(ds, dimensions))
return new_data
@classmethod
def values(cls, dataset, dimension, expanded, flat):
"""
Returns a single concatenated array of all subpaths separated
by NaN values. If expanded keyword is False an array of arrays
is returned.
"""
if not dataset.data:
return np.array([])
values = []
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
dvals = ds.interface.values(ds, dimension, expanded, flat)
if not len(dvals):
continue
elif expanded:
values.append(dvals)
values.append([np.NaN])
else:
values.append(dvals)
if not values:
return np.array([])
elif expanded:
return np.concatenate(values[:-1])
else:
return np.concatenate(values)
@classmethod
def split(cls, dataset, start, end, datatype, **kwargs):
"""
Splits a multi-interface Dataset into regular Datasets using
regular tabular interfaces.
"""
objs = []
if datatype is None:
for d in dataset.data[start: end]:
objs.append(dataset.clone(d, datatype=cls.subtypes))
return objs
elif not dataset.data:
return objs
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
if datatype == 'array':
obj = ds.array(**kwargs)
elif datatype == 'dataframe':
obj = ds.dframe(**kwargs)
elif datatype == 'columns':
if ds.interface.datatype == 'dictionary':
obj = dict(d)
else:
obj = ds.columns(**kwargs)
else:
raise ValueError("%s datatype not support" % datatype)
objs.append(obj)
return objs
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not len(dataset.data):
return dataset.data
elif values is None or np.isscalar(values):
values = [values]*len(dataset.data)
elif not len(values) == len(dataset.data):
raise ValueError('Added dimension values must be scalar or '
'match the length of the data.')
new_data = []
template = cls._inner_dataset_template(dataset)
array_type = template.interface.datatype == 'array'
for d, v in zip(dataset.data, values):
template.data = d
if array_type:
ds = template.clone(template.columns())
else:
ds = template
new_data.append(ds.interface.add_dimension(ds, dimension, dim_pos, v, vdim))
return new_data
Interface.register(MultiInterface)
|
from django.db import models
# Create your models here.
class Driverstanding(models.Model):
driverStandingsId = models.IntegerField(primary_key=True)
raceId = models.ForeignKey('races.Race', on_delete=models.CASCADE)
driverId = models.ForeignKey('drivers.Driver', on_delete=models.CASCADE)
points = models.FloatField()
position = models.IntegerField(null=True)
positionText = models.TextField(null=True)
wins = models.IntegerField()
|
from pyroute2 import netns, NDB, netlink, NSPopen
from contextlib import contextmanager
import ipaddress
import subprocess
import os
import os.path
"""
TODO: Add an introduction to network namespaces, veth interfaces, and bridges,
and explain why we use them here.
"""
BRIDGE_NF_CALL_IPTABLES = "/proc/sys/net/bridge/bridge-nf-call-iptables"
COMMAND_TIMEOUT = 60
@contextmanager
def managed_nspopen(*args, **kwds):
proc = NSPopen(*args, **kwds)
try:
yield proc
finally:
if proc.poll() is None:
# send SIGKILL to the process and wait for it to die if it's still
# running
proc.kill()
# If it's not dead after 2 seconds we throw an error
proc.communicate(timeout=2)
# release proxy process resourecs
proc.release()
class VirtualLAN:
"""
Helper class to create a network of virtual nodes to simulate a virtual network.
IP addresses are assigned automatically to the nodes from a private IP range.
IP address of a virtual node can be accessed using the node.address field.
Internally, this is a network of Linux network namespaces connected by a
bridge.
TODO: explain more details and add an example.
"""
def __init__(self, namePrefix, subnet):
ipnet = ipaddress.ip_network(subnet)
self.availableHosts = ipnet.hosts()
self.prefixLen = ipnet.prefixlen
self.namePrefix = namePrefix
self.nodes = []
# create the bridge
self.bridgeName = "%s-br" % (namePrefix,)
self.bridgeAddress = next(self.availableHosts)
self._add_bridge(self.bridgeName, self.bridgeAddress, self.prefixLen)
# Don't pass bridged IPv4 traffic to iptables' chains, so namespaces
# can communicate irrespective of the host machines iptables. This is
# needed in some docker instances (e.g. travis), where traffic was
# filtered at bridge level. See
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
try:
with open(BRIDGE_NF_CALL_IPTABLES, "r") as f:
self.saved_bridge_nf_call_iptables = f.read()
with open(BRIDGE_NF_CALL_IPTABLES, "w") as f:
f.write("0\n")
except FileNotFoundError:
# In some environments this variable doesn't exist, we are ok with
# no changes in this case.
self.saved_bridge_nf_call_iptables = None
def create_node(self):
"""
Creates a VirtualNode which can access/be accessed from other nodes in
the virtual network.
"""
namespace = "%s-%s" % (self.namePrefix, len(self.nodes))
address = next(self.availableHosts)
node = VirtualNode(namespace, address, self.prefixLen)
self._add_interface_to_bridge(self.bridgeName, node.vethPeer)
self.nodes.append(node)
return node
def destroy(self):
"""
Destroys the objects created for the virtual network.
"""
for node in self.nodes:
node.destroy()
_remove_interface_if_exists(self.bridgeName)
if self.saved_bridge_nf_call_iptables is not None:
with open(BRIDGE_NF_CALL_IPTABLES, "w") as f:
f.write(self.saved_bridge_nf_call_iptables)
def _add_bridge(self, name, address, prefixLen):
"""
Creates a bridge with the given name, address, and netmask perfix length.
"""
_remove_interface_if_exists(name)
with NDB() as ndb:
(
ndb.interfaces.create(ifname=name, kind="bridge", state="up")
.add_ip("%s/%s" % (address, prefixLen))
.commit()
)
def _add_interface_to_bridge(self, bridge, interface):
"""
Adds the given interface to the bridge. In our usecase, this interface
is usually the peer end of a veth pair with the other end inside a
network namespace, in which case after calling this function the namespace
will be able to communicate with the other nodes in the virtual network.
"""
with NDB() as ndb:
ndb.interfaces[bridge].add_port(interface).commit()
ndb.interfaces[interface].set(state="up").commit()
class VirtualNode:
"""
A virtual node inside a virtual network.
Internally, this corresponds to a Linux network namespace.
"""
def __init__(self, namespace, address, prefixLen):
self.namespace = namespace
self.address = address
self.prefixLen = prefixLen
self.vethPeer = namespace + "p"
self._add_namespace(namespace, address, prefixLen)
def destroy(self):
"""
Removes all objects created for the virtual node.
"""
_remove_interface_if_exists(self.vethPeer)
try:
netns.remove(self.namespace)
except:
# Namespace doesn't exist. Return silently.
pass
def run(self, command, user=os.getenv("USER")):
"""
Executes a command under the given user from this virtual node. Returns
a context manager that returns NSOpen object to control the process.
NSOpen has the same API as subprocess.POpen.
"""
sudo_command = [
"sudo",
"-E",
"-u",
user,
"env",
"PATH=" + os.getenv("PATH"),
] + command
return managed_nspopen(
self.namespace,
sudo_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
start_new_session=True,
)
def run_unmanaged(self, command, user=os.getenv("USER")):
"""
Executes a command under the given user from this virtual node. Returns
an NSPopen object to control the process. NSOpen has the same API as
subprocess.Popen. This NSPopen object needs to be manually release. In
general you should prefer using run, where this is done automatically
by the context manager.
"""
sudo_command = [
"sudo",
"-E",
"-u",
user,
"env",
"PATH=" + os.getenv("PATH"),
] + command
return NSPopen(
self.namespace,
sudo_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
start_new_session=True,
)
def run_and_wait(self, command, name, timeout=COMMAND_TIMEOUT):
"""
Waits for command to exit successfully. If it exits with error or it timeouts,
raises an exception with stdout and stderr streams of the process.
"""
with self.run(command) as proc:
try:
out, err = proc.communicate(timeout=timeout)
if proc.returncode > 0:
raise Exception(
"%s failed, out: %s\n, err: %s" % (name, out, err)
)
return out, err
except subprocess.TimeoutExpired:
proc.kill()
out, err = proc.communicate()
raise Exception(
"%s timed out after %d seconds. out: %s\n, err: %s"
% (name, timeout, out, err)
)
def _add_namespace(self, name, address, netmaskLength):
"""
Creates a namespace with the given name, and creates a veth interface
with one endpoint inside the namespace which has the given address and
netmask length. The peer end of veth interface can be used to connect the
namespace to a bridge.
"""
self._remove_namespace_if_exists(name)
netns.create(name)
veth_name = "veth0"
_remove_interface_if_exists(self.vethPeer)
with NDB() as ndb:
#
# Add netns to the NDB sources
#
# ndb.interfaces["lo"] is a short form of
# ndb.interfaces[{"target": "localhost", "ifname": "lo"}]
#
# To address interfaces/addresses/routes wthin a netns, use
# ndb.interfaces[{"target": netns_name, "ifname": "lo"}]
ndb.sources.add(netns=name)
#
# Create veth
(
ndb.interfaces.create(
ifname=veth_name,
kind="veth",
peer=self.vethPeer,
state="up",
)
.commit()
.set(net_ns_fd=name)
.commit()
)
#
# .interfaces.wait() returns an interface object when
# it becomes available on the specified source
(
ndb.interfaces.wait(target=name, ifname=veth_name)
.set(state="up")
.add_ip("%s/%s" % (address, netmaskLength))
.commit()
)
#
(
ndb.interfaces[{"target": name, "ifname": "lo"}]
.set(state="up")
.commit()
)
def _remove_namespace_if_exists(self, name):
"""
If the given namespace exists, removes it. Otherwise just returns
silently.
"""
try:
netns.remove(name)
except Exception:
# Namespace doesn't exist. Return silently.
pass
def ifdown(self):
"""
Bring the network interface down for this node
"""
with NDB() as ndb:
# bring it down and wait until success
ndb.interfaces[self.vethPeer].set(state="down").commit()
def ifup(self):
"""
Bring the network interface up for this node
"""
with NDB() as ndb:
# bring it up and wait until success
ndb.interfaces[self.vethPeer].set(state="up").commit()
def _remove_interface_if_exists(name):
"""
If the given interface exists, brings it down and removes it. Otherwise
just returns silently. A bridge is also an interface, so this can be
used for removing bridges too.
"""
with NDB() as ndb:
if name in ndb.interfaces:
try:
ndb.interfaces[name].remove().commit()
except netlink.exceptions.NetlinkError:
pass
|
# 020 - O mesmo professor do desafio anterior quer sortear a ordem de apresentação dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
'''
from random import shuffle
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
alunos = [aluno1, aluno2, aluno3, aluno4]
shuffle(alunos)
print("Sorteio - aluno n°1: {}".format(alunos[0]))
print("Sorteio - aluno n°2: {}".format(alunos[1]))
print("Sorteio - aluno n°3: {}".format(alunos[2]))
print("Sorteio - aluno n°4: {}".format(alunos[3]))
'''
'''
import random
n1 = str(input('Primeiro aluno: '))
n2 = str(input('Segundo aluno: '))
n3 = str(input('Terceiro aluno: '))
n4 = str(input('Quarto aluno: '))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print("A ordem de apresentação será: ")
print(lista)
'''
# importando apenas o que será utilizado
from random import shuffle
n1 = str(input('Primeiro aluno: '))
n2 = str(input('Segundo aluno: '))
n3 = str(input('Terceiro aluno: '))
n4 = str(input('Quarto aluno: '))
lista = [n1, n2, n3, n4]
shuffle(lista)
print("A ordem de apresentação será: ")
print(lista)
|
"""
sphinx.domains.changeset
~~~~~~~~~~~~~~~~~~~~~~~~
The changeset domain.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from collections import namedtuple
from typing import cast
from docutils import nodes
from sphinx import addnodes
from sphinx import locale
from sphinx.deprecation import DeprecatedDict, RemovedInSphinx30Warning
from sphinx.domains import Domain
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
versionlabels = {
'versionadded': _('New in version %s'),
'versionchanged': _('Changed in version %s'),
'deprecated': _('Deprecated since version %s'),
}
versionlabel_classes = {
'versionadded': 'added',
'versionchanged': 'changed',
'deprecated': 'deprecated',
}
locale.versionlabels = DeprecatedDict(
versionlabels,
'sphinx.locale.versionlabels is deprecated. '
'Please use sphinx.domains.changeset.versionlabels instead.',
RemovedInSphinx30Warning
)
# TODO: move to typing.NamedTuple after dropping py35 support (see #5958)
ChangeSet = namedtuple('ChangeSet',
['type', 'docname', 'lineno', 'module', 'descname', 'content'])
class VersionChange(SphinxDirective):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self):
# type: () -> List[nodes.Node]
node = addnodes.versionmodified()
node.document = self.state.document
set_source_info(self, node)
node['type'] = self.name
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno + 1)
para = nodes.paragraph(self.arguments[1], '', *inodes, translatable=False)
set_source_info(self, para)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
classes = ['versionmodified', versionlabel_classes[self.name]]
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content, translatable=False))
para = cast(nodes.paragraph, node[0])
para.insert(0, nodes.inline('', '%s: ' % text, classes=classes))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=classes),
translatable=False)
node.append(para)
domain = cast(ChangeSetDomain, self.env.get_domain('changeset'))
domain.note_changeset(node)
ret = [node] # type: List[nodes.Node]
ret += messages
return ret
class ChangeSetDomain(Domain):
"""Domain for changesets."""
name = 'changeset'
label = 'changeset'
initial_data = {
'changes': {}, # version -> list of ChangeSet
} # type: Dict
def clear_doc(self, docname):
# type: (str) -> None
for version, changes in self.data['changes'].items():
for changeset in changes[:]:
if changeset.docname == docname:
changes.remove(changeset)
def merge_domaindata(self, docnames, otherdata):
# type: (List[str], Dict) -> None
# XXX duplicates?
for version, otherchanges in otherdata['changes'].items():
changes = self.data['changes'].setdefault(version, [])
for changeset in otherchanges:
if changeset.docname in docnames:
changes.append(changeset)
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
pass # nothing to do here. All changesets are registered on calling directive.
def note_changeset(self, node):
# type: (addnodes.versionmodified) -> None
version = node['version']
module = self.env.ref_context.get('py:module')
objname = self.env.temp_data.get('object')
changeset = ChangeSet(node['type'], self.env.docname, node.line,
module, objname, node.astext())
self.data['changes'].setdefault(version, []).append(changeset)
def get_changesets_for(self, version):
# type: (str) -> List[ChangeSet]
return self.data['changes'].get(version, [])
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(ChangeSetDomain)
app.add_directive('deprecated', VersionChange)
app.add_directive('versionadded', VersionChange)
app.add_directive('versionchanged', VersionChange)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
# Generated by Django 2.1.15 on 2020-11-29 18:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flightApp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='flight',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flightApp.Flight'),
),
]
|
"""Plot intensity profile of theoretical beam patterns."""
import matplotlib.pyplot as plt
import numpy as np
from frbpoppy.survey import Survey
PATTERNS = ['perfect', 'gaussian', 'airy-0', 'airy-4']
SURVEY = 'apertif'
MIN_Y = 1e-6
n = 500000
for pattern in PATTERNS:
n_sidelobes = 1
p = pattern
z = 0
if pattern.startswith('airy'):
n_sidelobes = int(pattern[-1])
p = 'airy'
if n_sidelobes == 0:
z = 10
s = Survey(SURVEY, gain_pattern=p, n_sidelobes=n_sidelobes)
int_pro, offset = s.intensity_profile(n_gen=n)
# Sort the values
sorted_int = np.argsort(offset)
int_pro = int_pro[sorted_int]
offset = offset[sorted_int]
# Clean up lower limit
offset = offset[int_pro > MIN_Y]
int_pro = int_pro[int_pro > MIN_Y]
# Offset in degrees
offset = offset/60.
print(s.beam_size_fwhm, s.beam_size)
plt.plot(offset, int_pro, label=pattern, zorder=z)
plt.xlabel(f'Offset ($\degree$)')
plt.ylabel('Intensity Profile')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig('plots/int_pro_theory.pdf')
|
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# Resize the image to a fixed size and ignore the aspect ratio
return cv2.resize(image, (self.width, self.height), interpolation=self.inter)
|
import pandas as pd
import matplotlib.cm as cm
import numpy as np
import matplotlib.pyplot as plt
def plot(problemVariants, *, zero, outfile, numThreads):
columns = ['Problem', 'NotTriedYet', 'Scheduled', 'Success', 'Timeout', 'Stopped', 'Ended']
colors = ['w', 'tab:purple', 'tab:green', 'tab:orange', 'tab:red', 'w']
problems = {}
for problemVariant in problemVariants:
problem = problemVariant.problem
if not (problem.filePattern in problems):
problems[problem.filePattern] = problem
variants = {}
for problemVariant in problemVariants:
v = problemVariant.variant
if not (v in variants):
variants[v] = []
'''
Overall time used
'''
t_max = 0
for problemVariant in problemVariants:
t_max = max(t_max, problemVariant.process.timer.getEnd(zero))
for k, problem in sorted(problems.items(), reverse=True):
for v in sorted(variants.keys(), reverse=True):
if not (v in problem.variants):
variants[v].append([
problem.filePattern,
t_max, # time waiting
0, # time scheduled
0, # time success
0, # time timeout
0, # time error
0, # time ended
])
else:
problemVariant = problem.variants[v]
scheduled = problemVariant.process.timer.getScheduled(zero)
started = problemVariant.process.timer.getStart(zero)
ended = problemVariant.process.timer.getEnd(zero)
if problemVariant.isSuccessful():
state = 'Success'
elif problemVariant.szsStatus == 'Timeout':
state = 'Timeout'
else:
state = 'Stopped'
variants[v].append([
problem.filePattern,
scheduled, # time waiting
started - scheduled, # time scheduled
ended - started if state == 'Success' else 0, # time success
ended - started if state == 'Timeout' else 0, # time timeout
ended - started if state == 'Stopped' else 0, # time error
t_max - ended,
])
dfs = []
labels = []
for v, vd in variants.items():
df = pd.DataFrame(vd,
columns=columns,
).set_index('Problem')
dfs.append(df)
labels.append("v"+v)
ax = plot_grouped_stacked_bar(dfs, labels, title='LTB Scheduler - Problem Timings using {} Threads'.format(numThreads), color=colors)
ax.set_ylabel("Problems")
ax.set_xlabel("Time in s")
fig = ax.get_figure()
fig.set_size_inches(15, 1*len(problems))
fig.savefig(outfile)
def plot_grouped_stacked_bar(dfall, labels, *, title, H="/", **kwargs):
'''
Given a list of dataframes, with identical columns and index, create a clustered stacked bar plot.
Args:
* labels: is a list of the names of the dataframe, used for the legend
* title: a string for the title of the plot
* H: is the hatch used for identification of the different dataframe
Shamelessly taken and modified version of https://stackoverflow.com/a/22845857 thank you jrjc
'''
n_df = len(dfall)
n_col = len(dfall[0].columns)
n_ind = len(dfall[0].index)
axe = plt.subplot(111)
for df in dfall:
axe = df.plot(
kind="barh",
linewidth=0,
stacked=True,
ax=axe,
legend=False,
grid=False,
**kwargs
) # single bar plots
h,l = axe.get_legend_handles_labels()
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
for j, pa in enumerate(h[i:i+n_col]):
for rect in pa.patches: # for each index
rect.set_y(rect.get_y() + 1 / float(n_df + 1) * i / float(n_col))
rect.set_hatch(H * int(i / n_col)) #edited part
rect.set_height(1 / float(n_df + 1))
axe.set_yticks((np.arange(0, 2 * n_ind, 2) + 1 / float(n_df + 1)) / 2.)
axe.set_yticklabels(df.index, rotation = 0)
axe.set_title(title)
# Add invisible data to add another legend
n=[]
for i in range(n_df):
n.append(axe.bar(0, 0, color="gray", hatch=H * i))
l1 = axe.legend(h[:n_col], l[:n_col], loc=[1.01, 0.5])
if labels is not None:
l2 = plt.legend(n, labels, loc=[1.01, 0.1])
axe.add_artist(l1)
return axe
|
#! /usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import urllib2
from swift.common.utils import SWIFT_CONF_FILE
from swift.common.ring import Ring
from urlparse import urlparse
try:
import simplejson as json
except ImportError:
import json
from hashlib import md5
import eventlet
import optparse
import time
import sys
import os
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size / 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemtry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6000/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except urllib2.URLError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, and status)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
url, content, status = self.scout_host(base_url, self.recon_type)
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = \
ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def _md5_file(self, path):
"""
Get the MD5 checksum of a file.
:param path: path to file
:returns: MD5 checksum, hex encoded
"""
md5sum = md5()
with open(path, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
return md5sum.hexdigest()
def get_devices(self, region_filter, zone_filter, swift_dir, ring_name):
"""
Get a list of hosts in the ring
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_name: Name of the ring, such as 'object'
:returns: a set of tuples containing the ip and port of hosts
"""
ring_data = Ring(swift_dir, ring_name=ring_name)
devs = [d for d in ring_data.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
for server_type in ('account', 'container'):
ring_name = '%s.ring.gz' % server_type
ring_names.add(ring_name)
# include any other object ring files
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('ring.gz'):
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
md5sum = md5()
with open(os.path.join(swift_dir, ring_name), 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
ring_sum = md5sum.hexdigest()
rings[ring_name] = ring_sum
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = self._md5_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
unmounted = {}
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
unmounted[url] = []
errors[url] = []
for i in response:
if not isinstance(i['mounted'], bool):
errors[url].append(i['device'])
else:
unmounted[url].append(i['device'])
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
response.get('object_expiration_pass'))
stats['expired_last_pass'].append(
response.get('expired_last_pass'))
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def replication_check(self, hosts):
"""
Obtain and print replication statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'replication_time': [], 'failure': [], 'success': [],
'attempted': []}
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
stats['replication_time'].append(
response.get('replication_time'))
repl_stats = response['replication_stats']
if repl_stats:
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last', 0)
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
for k in stats:
if stats[k]:
if k != 'replication_time':
computed = self._gen_stats(stats[k],
name='replication_%s' % k)
else:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host))
print("=" * 79)
def object_replication_check(self, hosts):
"""
Obtain and print replication statistics from object servers
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
recon = Scout("replication", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
stats[url] = response['object_replication_time']
last = response.get('object_replication_last', 0)
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
times = [x for x in stats.values() if x is not None]
if len(stats) > 0 and len(times) > 0:
computed = self._gen_stats(times, 'replication_time')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[replication_time] - No hosts returned valid data.")
else:
print("[replication_time] - No hosts returned valid data.")
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts):
"""
Obtain and print updater statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' %
self.server_type])
if len(stats) > 0:
computed = self._gen_stats(stats, name='updater_last_sweep')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
adone = '%s_auditor_pass_completed' % self.server_type
afail = '%s_audits_failed' % self.server_type
apass = '%s_audits_passed' % self.server_type
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
if scan[i][adone] is not None]
stats[afail] = [scan[i][afail] for i in scan
if scan[i][afail] is not None]
stats[apass] = [scan[i][apass] for i in scan
if scan[i][apass] is not None]
stats[asince] = [scan[i][asince] for i in scan
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
if computed['reported'] > 0:
self._print_stats(computed)
if len(stats[asince]) >= 1:
low = min(stats[asince])
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
Generator that yields all values for given key in a recon cache entry.
This is for use with object auditor recon cache entries. If the
object auditor has run in parallel, the recon cache will have entries
of the form: {'object_auditor_stats_ALL': { 'disk1': {..},
'disk2': {..},
'disk3': {..},
...}}
If the object auditor hasn't run in parallel, the recon cache will have
entries of the form: {'object_auditor_stats_ALL': {...}}.
The ZBF auditor doesn't run in parallel. However, if a subset of
devices is selected for auditing, the recon cache will have an entry
of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}}
We use this generator to find all instances of a particular key in
these multi-level dictionaries.
"""
for k, v in recon_entry.items():
if isinstance(v, dict):
for value in self.nested_get_value(key, v):
yield value
if k == key:
yield v
def object_auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
all_scan = {}
zbf_scan = {}
atime = 'audit_time'
bprocessed = 'bytes_processed'
passes = 'passes'
errors = 'errors'
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL']
if response['object_auditor_stats_ZBF']:
zbf_scan[url] = response['object_auditor_stats_ZBF']
if len(all_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, all_scan[i]))
for i in all_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
all_scan[i])) for i in all_scan]
stats[passes] = [sum(self.nested_get_value(passes, all_scan[i]))
for i in all_scan]
stats[errors] = [sum(self.nested_get_value(errors, all_scan[i]))
for i in all_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
all_scan[i])) for i in all_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ALL_auditor] - No hosts returned valid data.")
else:
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i]))
for i in zbf_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
zbf_scan[i])) for i in zbf_scan]
stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i]))
for i in zbf_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
zbf_scan[i])) for i in zbf_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def load_check(self, hosts):
"""
Obtain and print load average statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
load1 = {}
load5 = {}
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking load averages" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
load5[url] = response['5m']
load15[url] = response['15m']
stats = {"1m": load1, "5m": load5, "15m": load15}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
Obtain and print quarantine statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
objq = {}
conq = {}
acctq = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking quarantine" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
conq[url] = response['containers']
acctq[url] = response['accounts']
stats = {"objects": objq, "containers": conq, "accounts": acctq}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
Obtain and print /proc/net/sockstat statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
inuse4 = {}
mem = {}
inuse6 = {}
timewait = {}
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking socket usage" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes']
inuse6[url] = response.get('tcp6_in_use', 0)
timewait[url] = response['time_wait']
orphan[url] = response['orphan']
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
"tcp6_in_use": inuse6, "time_wait": timewait,
"orphan": orphan}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, human_readable=False):
"""
Obtain and print disk usage statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
highs = []
lows = []
raw_total_used = []
raw_total_avail = []
percents = {}
top_percents = [(None, 0)] * top
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
raw_total_used.append(entry['used'])
raw_total_avail.append(entry['avail'])
hostusage.append(round(used, 2))
for ident, oused in top_percents:
if oused < used:
top_percents.append(
(url + ' ' + entry['device'], used))
top_percents.sort(key=lambda x: -x[1])
top_percents.pop()
break
stats[url] = hostusage
for url in stats:
if len(stats[url]) > 0:
# get per host hi/los for another day
low = min(stats[url])
high = max(stats[url])
highs.append(high)
lows.append(low)
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
avg_used = 100.0 * raw_used / raw_total
if human_readable:
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print("=" * 79)
usage = '''
usage: %prog <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable]
<server_type>\taccount|container|object
Defaults to object server.
ex: %prog container -l --auditor
'''
args = optparse.OptionParser(usage)
args.add_option('--verbose', '-v', action="store_true",
help="Print verbose info")
args.add_option('--suppress', action="store_true",
help="Suppress most connection related errors")
args.add_option('--async', '-a', action="store_true",
help="Get async stats")
args.add_option('--replication', '-r', action="store_true",
help="Get replication stats")
args.add_option('--auditor', action="store_true",
help="Get auditor stats")
args.add_option('--updater', action="store_true",
help="Get updater stats")
args.add_option('--expirer', action="store_true",
help="Get expirer stats")
args.add_option('--unmounted', '-u', action="store_true",
help="Check cluster for unmounted devices")
args.add_option('--diskusage', '-d', action="store_true",
help="Get disk usage stats")
args.add_option('--human-readable', action="store_true",
help="Use human readable suffix for disk usage stats")
args.add_option('--loadstats', '-l', action="store_true",
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--all', action="store_true",
help="Perform all checks. Equal to -arudlq --md5 "
"--sockstat")
args.add_option('--region', type="int",
help="Only query servers in specified region")
args.add_option('--zone', '-z', type="int",
help="Only query servers in specified zone")
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
help="Time to wait for a response from a server",
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > 1:
args.print_help()
sys.exit(0)
if arguments:
if arguments[0] in self.check_types:
self.server_type = arguments[0]
else:
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else:
self.server_type = 'object'
swift_dir = options.swiftdir
self.verbose = options.verbose
self.suppress_errors = options.suppress
self.timeout = options.timeout
hosts = self.get_devices(options.region, options.zone,
swift_dir, self.server_type)
print("--> Starting reconnaissance on %s hosts" % len(hosts))
print("=" * 79)
if options.all:
if self.server_type == 'object':
self.async_check(hosts)
self.object_replication_check(hosts)
self.object_auditor_check(hosts)
self.updater_check(hosts)
self.expirer_check(hosts)
elif self.server_type == 'container':
self.replication_check(hosts)
self.auditor_check(hosts)
self.updater_check(hosts)
elif self.server_type == 'account':
self.replication_check(hosts)
self.auditor_check(hosts)
self.umount_check(hosts)
self.load_check(hosts)
self.disk_usage(hosts, options.top, options.human_readable)
self.get_ringmd5(hosts, swift_dir)
self.quarantine_check(hosts)
self.socket_usage(hosts)
else:
if options.async:
if self.server_type == 'object':
self.async_check(hosts)
else:
print("Error: Can't check asyncs on non object servers.")
if options.unmounted:
self.umount_check(hosts)
if options.replication:
if self.server_type == 'object':
self.object_replication_check(hosts)
else:
self.replication_check(hosts)
if options.auditor:
if self.server_type == 'object':
self.object_auditor_check(hosts)
else:
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print("Error: Can't check updaters on account servers.")
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print("Error: Can't check expired on non object servers.")
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
def main():
try:
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print('\n')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"TensorboardRun",},
)
class TensorboardRun(proto.Message):
r"""TensorboardRun maps to a specific execution of a training job
with a given set of hyperparameter values, model definition,
dataset, etc
Attributes:
name (str):
Output only. Name of the TensorboardRun. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
display_name (str):
Required. User provided name of this
TensorboardRun. This value must be unique among
all TensorboardRuns belonging to the same parent
TensorboardExperiment.
description (str):
Description of this TensorboardRun.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
TensorboardRun was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
TensorboardRun was last updated.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]):
etag (str):
Used to perform a consistent read-modify-
rite updates. If not set, a blind "overwrite"
update happens.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
labels = proto.MapField(proto.STRING, proto.STRING, number=8,)
etag = proto.Field(proto.STRING, number=9,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("CASTORDQM")
unitTest=False
if 'unitTest=True' in sys.argv:
unitTest=True
#=================================
# Event Source
#================================+
if unitTest:
process.load("DQM.Integration.config.unittestinputsource_cfi")
else:
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
#================================
# DQM Environment
#================================
#process.DQMStore.referenceFileName = 'castor_reference.root'
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = "Castor"
process.dqmSaver.tag = "Castor"
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring(
#'file:/eos/user/p/popov/rundata/Castor2018/525D2460-6A90-E811-RAWrun320317.root'),
# )
#============================================
# Castor Conditions: from Global Conditions Tag
#============================================
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
##
#from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
#process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
#-----------------------------
# Castor DQM Source + SimpleReconstrctor
#-----------------------------
###process.load("RecoLocalCalo.CastorReco.CastorSimpleReconstructor_cfi")
process.castorreco = cms.EDProducer("CastorSimpleReconstructor",
correctionPhaseNS = cms.double(0.0),
digiLabel = cms.InputTag("castorDigis"),
samplesToAdd = cms.int32(10),
Subdetector = cms.string('CASTOR'),
firstSample = cms.int32(0),
correctForPhaseContainment = cms.bool(False),
correctForTimeslew = cms.bool(False),
tsFromDB = cms.bool(False), #True
setSaturationFlag = cms.bool(True),
maxADCvalue = cms.int32(127),
doSaturationCorr = cms.bool(False) #True
)
###process.castorreco.tsFromDB = cms.untracked.bool(False)
process.load('RecoLocalCalo.Castor.Castor_cff') #castor tower and jet reconstruction
from EventFilter.CastorRawToDigi.CastorRawToDigi_cff import *
process.castorDigis = castorDigis.clone()
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
process.castorMonitor = DQMEDAnalyzer("CastorMonitorModule",
### GLOBAL VARIABLES
debug = cms.untracked.int32(0), #(=0 - no messages)
# Turn on/off timing diagnostic
showTiming = cms.untracked.bool(False),
# Define Labels
l1tStage2uGtSource = cms.InputTag("gtStage2Digis"),
tagTriggerResults = cms.InputTag('TriggerResults','','HLT'),
HltPaths = cms.vstring("HLT_ZeroBias","HLT_Random"),
digiLabel = cms.InputTag("castorDigis"),
rawLabel = cms.InputTag("rawDataCollector"),
unpackerReportLabel = cms.InputTag("castorDigis"),
CastorRecHitLabel = cms.InputTag("castorreco"),
CastorTowerLabel = cms.InputTag("CastorTowerReco"),
CastorBasicJetsLabel = cms.InputTag("ak7CastorJets"),
CastorJetIDLabel = cms.InputTag("ak7CastorJetID"),
DataIntMonitor= cms.untracked.bool(True),
TowerJetMonitor= cms.untracked.bool(True),
DigiMonitor = cms.untracked.bool(True),
RecHitMonitor = cms.untracked.bool(True),
# LEDMonitor = cms.untracked.bool(True),
# LEDPerChannel = cms.untracked.bool(True),
FirstSignalBin = cms.untracked.int32(0),
LastSignalBin = cms.untracked.int32(9)
)
#-----------------------------
# Scheduling
#-----------------------------
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound',
'TooManyProducts',
'TooFewProducts')
)
# castorDigis -> CastorRawToDigi_cfi
# castorreco -> CastorSimpleReconstructor_cfi
# castorMonitor -> CastorMonitorModule_cfi
process.p = cms.Path(process.castorDigis*process.castorreco*process.castorMonitor*process.dqmEnv*process.dqmSaver)
#process.p = cms.Path(process.castorDigis*process.castorMonitor*process.dqmEnv*process.dqmSaver)
#process.p = cms.Path(process.castorMonitor*process.dqmEnv*process.dqmSaver)
process.castorDigis.InputLabel = cms.InputTag("rawDataCollector")
process.castorMonitor.rawLabel = cms.InputTag("rawDataCollector")
#--------------------------------------------------
# Heavy Ion Specific Fed Raw Data Collection Label
#--------------------------------------------------
print("Running with run type = ", process.runType.getRunTypeName())
if (process.runType.getRunType() == process.runType.hi_run):
process.castorDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.castorMonitor.rawLabel = cms.InputTag("rawDataRepacker")
### process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
|
# -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-11-14 11:11:35
@Last Modified by: tushushu
@Last Modified time: 2018-11-14 11:11:35
"""
from copy import copy
from itertools import tee
from numpy import exp, ndarray
from random import randint
from statistics import median
from time import time
from typing import List
def arr2str(arr: ndarray, n_digits: int) -> str:
ret = ", ".join(map(lambda x: str(round(x, n_digits)), arr))
return "[%s]" % ret
def run_time(fn):
"""Decorator for calculating function runtime.Depending on the length of time,
seconds, milliseconds, microseconds or nanoseconds are used.
Arguments:
fn {function}
Returns:
function
"""
def inner():
start = time()
fn()
ret = time() - start
if ret < 1e-6:
unit = "ns"
ret *= 1e9
elif ret < 1e-3:
unit = "us"
ret *= 1e6
elif ret < 1:
unit = "ms"
ret *= 1e3
else:
unit = "s"
print("Total run time is %.1f %s\n" % (ret, unit))
return inner
def sigmoid(x):
"""Calculate the sigmoid value of x.
Sigmoid(x) = 1 / (1 + e^(-x))
It would cause math range error when x < -709
Arguments:
x {float}
Returns:
float -- between 0 and 1
"""
return 1 / (1 + exp(-x))
def split_list(X, idxs, feature, split, low, high):
""" Sort the list, if the element in the array is less than result index,
the element value is less than the split. Otherwise, the element value is
equal to or greater than the split.
Arguments:
X {list} -- 2d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number
split {float} -- The split point value
Returns:
int -- index
"""
p = low
q = high - 1
while p <= q:
if X[idxs[p]][feature] < split:
p += 1
elif X[idxs[q]][feature] >= split:
q -= 1
else:
idxs[p], idxs[q] = idxs[q], idxs[p]
return p
def list_split(X, idxs, feature, split):
"""Another implementation of "split_list" function for performance comparison.
Arguments:
nums {list} -- 1d list with int or float
split {float} -- The split point value
Returns:
list -- 2d list with left and right split result
"""
ret = [[], []]
while idxs:
if X[idxs[0]][feature] < split:
ret[0].append(idxs.pop(0))
else:
ret[1].append(idxs.pop(0))
return ret
def _test_split_list(iterations=10**4, max_n_samples=1000, max_n_features=10,
max_element_value=100):
"""Test correctness and runtime efficiency of both split_list functions.
_split_list takes about 2.4 times as split_list does.
Keyword Arguments:
iterations {int} -- How many times to iterate. (default: {10**4})
max_arr_len {int} -- Max random length of array (default: {1000})
max_num {int} -- Max value of array's elements (default: {100})
"""
time_1 = time_2 = 0
for _ in range(iterations):
n = randint(1, max_n_samples)
m = randint(1, max_n_features)
X = [[randint(1, max_element_value) for _ in range(m)]
for _ in range(n)]
idxs_1 = list(range(n))
idxs_2 = copy(idxs_1)
feature = randint(1, m) - 1
split = median(map(lambda i: X[i][feature], range(n)))
low = 0
high = n
start = time()
ret_1 = split_list(X, idxs_1, feature, split, low, high)
time_1 += time() - start
start = time()
ret_2 = list_split(X, idxs_2, feature, split)
time_2 += time() - start
assert all(i_1 == i_2 for i_1, i_2 in zip(
sorted(idxs_1[low:ret_1]), sorted(ret_2[0])))
assert all(i_1 == i_2 for i_1, i_2 in zip(
sorted(idxs_1[ret_1:high]), sorted(ret_2[1])))
print("Test passed!")
print("split_list runtime for %d iterations is: %.3f seconds" %
(iterations, time_1))
print("_split_list runtime for %d iterations is: %.3f seconds" %
(iterations, time_2))
def get_euclidean_distance(arr1: ndarray, arr2: ndarray) -> float:
""""Calculate the Euclidean distance of two vectors.
Arguments:
arr1 {ndarray}
arr2 {ndarray}
Returns:
float
"""
return ((arr1 - arr2) ** 2).sum() ** 0.5
def get_eu_dist(arr1: List, arr2: List) -> float:
"""Calculate the Euclidean distance of two vectors.
Arguments:
arr1 {list} -- 1d list object with int or float
arr2 {list} -- 1d list object with int or float
Returns:
float -- Euclidean distance
"""
return sum((x1 - x2) ** 2 for x1, x2 in zip(arr1, arr2)) ** 0.5
def get_cosine_distance(arr1, arr2):
"""Calculate the cosine distance of two vectors.
Arguments:
arr1 {list} -- 1d list object with int or float
arr2 {list} -- 1d list object with int or float
Returns:
float -- cosine distance
"""
numerator = sum(x1 * x2 for x1, x2 in zip(arr1, arr2))
denominator = (sum(x1 ** 2 for x1 in arr1) *
sum(x2 ** 2 for x2 in arr2)) ** 0.5
return numerator / denominator
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
Arguments:
iterable {iterable}
Returns:
zip
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def arg_max_2d(dic):
return max(((k, *max(dic_inner.items(), key=lambda x: x[1]))
for k, dic_inner in dic.items()), key=lambda x: x[2])[:2]
|
from django.db import models
DAY_CHOICES = [
('vkl', 'Koko viikonloppu'),
('la', 'Vain lauantai'),
('su', 'Vain sunnuntai'),
]
class Artist(models.Model):
site = models.ForeignKey('sites.Site', on_delete=models.CASCADE)
day = models.CharField(max_length=max(len(i) for (i, j) in DAY_CHOICES), default=DAY_CHOICES[0][0], choices=DAY_CHOICES)
table_number = models.IntegerField(blank=True, null=True)
name = models.CharField(max_length=255)
homepage_url = models.CharField(max_length=255, blank=True, default='')
description = models.TextField(blank=True, default='')
image_file = models.ImageField(upload_to='artists', blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'taiteilija'
verbose_name_plural = 'taiteilijat'
ordering = ('site', 'table_number', 'name')
|
#!/usr/bin/env python
from fake_rpi import printf
from fake_rpi import toggle_print
# Replace libraries by fake ones
import sys
import fake_rpi
sys.modules['RPi'] = fake_rpi.RPi
sys.modules['smbus'] = fake_rpi.smbus
# Then keep the transparent import everywhere in the application and dependencies
import RPi.GPIO as GPIO
import smbus
toggle_print(True) # turn on/off printing
pwm = GPIO.PWM()
pwm.start(5)
i2c = smbus.SMBus(1)
i2c.write_byte_data(1, 2, 3)
i2c.read_byte_data(1, 2)
i2c.close()
class MyBus(smbus.SMBus):
"""
Here I want to over ride how this behaves for testing
"""
@printf
def __init__(self):
pass
@printf
def read_byte_data(self, a, b):
ret = 0x71 if a == 0x68 else 0x48
return ret
i2c = MyBus()
i2c.read_byte_data(1, 2)
i2c.read_i2c_block_data(1, 2, 3)
|
import sys
from pipenv.patched.notpip._internal.cli.main import main
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
import re
import json
rs = requests.get('https://go.mail.ru/search?q=cats')
print(rs)
data = re.search('go.dataJson = (.+);', rs.text)
if not data:
print('Not data!')
quit()
data = data.group(1)
rs_data = json.loads(data)
print(rs_data)
for result in rs_data['serp']['results']:
if 'url' not in result:
continue
print(result['url'])
# http://mau.ru/
# https://ru.wikipedia.org/wiki/%D0%9A%D0%BE%D1%88%D0%BA%D0%B8_(%D0%BC%D1%8E%D0%B7%D0%B8%D0%BA%D0%BB)
# http://cats-crasharena.ru/
# http://vk.com/vk.cats
# https://trashbox.ru/link/zeptolab-cats-android
# https://wooordhunt.ru/word/cats
# https://play.google.com/store/apps/details?hl=ru&id=com.zeptolab.cats.google
# http://www.youtube.com/channel/UC4uhxfDlhySxeEQldGsn4TQ
# http://anolink.ru/category/igry/battle-cats/
# https://www.babla.ru/%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%B8%D0%B9-%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9/cats
|
import os
import sys
import torch
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'ops'))
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
def angle_axis(angle, axis):
# type: (float, np.ndarray) -> float
r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
Parameters
----------
angle : float
Angle to rotate by
axis: np.ndarray
Axis to rotate about
Returns
-------
torch.Tensor
3x3 rotation matrix
"""
u = axis / np.linalg.norm(axis)
cosval, sinval = np.cos(angle), np.sin(angle)
# yapf: disable
cross_prod_mat = np.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
R = torch.from_numpy(
cosval * np.eye(3)
+ sinval * cross_prod_mat
+ (1.0 - cosval) * np.outer(u, u)
)
# yapf: enable
return R.float()
class PointcloudScale(object):
def __init__(self, scale_low=0.8, scale_high=1.25):
self.scale_low, self.scale_high = scale_low, scale_high
def __call__(self, points):
scaler = np.random.uniform(self.scale_low, self.scale_high, size=[3])
scaler = torch.from_numpy(scaler).float()
points[:, 0:3] *= scaler
return points
class PointcloudRotate(object):
def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
self.axis = axis
def __call__(self, points):
rotation_angle = np.random.uniform() * 2 * np.pi
rotation_matrix = angle_axis(rotation_angle, self.axis)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRotatePerturbation(object):
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma, self.angle_clip = angle_sigma, angle_clip
def _get_angles(self):
angles = np.clip(
self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip
)
return angles
def __call__(self, points):
angles_ = self._get_angles()
Rx = angle_axis(angles_[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles_[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles_[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRandomRotate(object):
def __init__(self, x_range=np.pi, y_range=np.pi, z_range=np.pi):
self.x_range = x_range
self.y_range = y_range
self.z_range = z_range
def _get_angles(self):
x_angle = np.random.uniform(-self.x_range, self.x_range)
y_angle = np.random.uniform(-self.y_range, self.y_range)
z_angle = np.random.uniform(-self.z_range, self.z_range)
return np.array([x_angle, y_angle, z_angle])
def __call__(self, points):
angles_ = self._get_angles()
Rx = angle_axis(angles_[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles_[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles_[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudJitter(object):
def __init__(self, std=0.01, clip=0.05):
self.std, self.clip = std, clip
def __call__(self, points):
jittered_data = (
points.new(points.size(0), 3)
.normal_(mean=0.0, std=self.std)
.clamp_(-self.clip, self.clip)
)
points[:, 0:3] += jittered_data
return points
class PointcloudTranslate(object):
def __init__(self, translate_range=0.1):
self.translate_range = translate_range
def __call__(self, points):
translation = np.random.uniform(-self.translate_range, self.translate_range, size=[3])
translation = torch.from_numpy(translation)
points[:, 0:3] += translation
return points
class PointcloudToTensor(object):
def __call__(self, points):
return torch.from_numpy(points).float()
class PointcloudRandomInputDropout(object):
def __init__(self, max_dropout_ratio=0.875):
assert max_dropout_ratio >= 0 and max_dropout_ratio < 1
self.max_dropout_ratio = max_dropout_ratio
def __call__(self, points):
pc = points.numpy()
dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0]
if len(drop_idx) > 0:
pc[drop_idx] = pc[0] # set to the first point
return torch.from_numpy(pc).float()
class PointcloudScaleAndTranslate(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_range=0.2):
self.scale_low = scale_low
self.scale_high = scale_high
self.translate_range = translate_range
def __call__(self, pc):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
xyz2 = np.random.uniform(low=-self.translate_range, high=self.translate_range, size=[3])
pc[:, 0:3] = torch.mul(pc[:, 0:3], torch.from_numpy(xyz1).float()) + torch.from_numpy(
xyz2).float()
return pc
class PointcloudScaleAndJitter(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., std=0.01, clip=0.05, augment_symmetries=[0, 0, 0]):
self.scale_low = scale_low
self.scale_high = scale_high
self.std = std
self.clip = clip
self.augment_symmetries = augment_symmetries
def __call__(self, pc):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
symmetries = np.round(np.random.uniform(low=0, high=1, size=[3])) * 2 - 1
symmetries = symmetries * np.array(self.augment_symmetries) + (1 - np.array(self.augment_symmetries))
xyz1 *= symmetries
xyz2 = np.clip(np.random.normal(scale=self.std, size=[pc.shape[0], 3]), a_min=-self.clip, a_max=self.clip)
pc[:, 0:3] = torch.mul(pc[:, 0:3], torch.from_numpy(xyz1).float()) + torch.from_numpy(
xyz2).float()
return pc
class BatchPointcloudScaleAndTranslate(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_range=0.2):
self.scale_low = scale_low
self.scale_high = scale_high
self.translate_range = translate_range
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
xyz2 = np.random.uniform(low=-self.translate_range, high=self.translate_range, size=[3])
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().to(pc.device)) + torch.from_numpy(
xyz2).float().to(pc.device)
return pc
class BatchPointcloudScaleAndJitter(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., std=0.01, clip=0.05, augment_symmetries=[0, 0, 0]):
self.scale_low = scale_low
self.scale_high = scale_high
self.std, self.clip = std, clip
self.augment_symmetries = augment_symmetries
def __call__(self, pc):
bsize = pc.size()[0]
npoint = pc.size()[1]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
symmetries = np.round(np.random.uniform(low=0, high=1, size=[3])) * 2 - 1
symmetries = symmetries * np.array(self.augment_symmetries) + (1 - np.array(self.augment_symmetries))
xyz1 *= symmetries
xyz2 = np.clip(np.random.normal(scale=self.std, size=[npoint, 3]), a_max=self.clip, a_min=-self.clip)
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().to(pc.device)) + torch.from_numpy(
xyz2).float().to(pc.device)
return pc
class BatchPointcloudRandomRotate(object):
def __init__(self, x_range=np.pi, y_range=np.pi, z_range=np.pi):
self.x_range = x_range
self.y_range = y_range
self.z_range = z_range
def _get_angles(self):
x_angle = np.random.uniform(-self.x_range, self.x_range)
y_angle = np.random.uniform(-self.y_range, self.y_range)
z_angle = np.random.uniform(-self.z_range, self.z_range)
return np.array([x_angle, y_angle, z_angle])
def __call__(self, pc):
bsize = pc.size()[0]
normals = pc.size()[2] > 3
for i in range(bsize):
angles_ = self._get_angles()
Rx = angle_axis(angles_[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles_[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles_[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx).to(pc.device)
if not normals:
pc[i, :, 0:3] = torch.matmul(pc[i, :, 0:3], rotation_matrix.t())
else:
pc[i, :, 0:3] = torch.matmul(pc[i, :, 0:3], rotation_matrix.t())
pc[i, :, 3:] = torch.matmul(pc[i, :, 3:], rotation_matrix.t())
return pc
|
#!/usr/bin/env python3
import sys
sys.path.insert(0,'../')
from aoc_input import *
from itertools import permutations
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
def flatten(s):
ret = []
level = 0
for c in s:
if c == '[':
level += 1
elif c == ']':
level -= 1
elif c == ',':
continue
else:
# single digit
ret.append((int(c), level))
return ret
# To explode a pair, the pair's left value is added to the first regular number
# to the left of the exploding pair (if any), and the pair's right value is added
# to the first regular number to the right of the exploding pair (if any).
def explode(n):
length = len(n)
for i in range(length):
lval, depth = n[i]
if depth > 4:
rval = n[i + 1][0]
if i > 0:
prevval, prevdepth = n[i - 1]
n[i - 1] = (prevval + lval, prevdepth)
if i + 2 < length:
nval, ndepth = n[i + 2]
n[i + 2] = (nval + rval, ndepth)
n[i : i + 2] = [(0, depth - 1)]
# Exploded
return True
# Did not explode
return False
# To split a regular number, replace it with a pair;
# the left element of the pair should be the regular number divided by two and rounded down,
# while the right element of the pair should be the regular number divided by two and rounded up.
# For example, 10 becomes [5,5], 11 becomes [5,6], 12 becomes [6,6], and so on.
def split(n):
for i in range(len(n)):
value, depth = n[i]
if value >= 10:
# Split
floor = value // 2
if value & 1:
# round up
ceil = floor + 1
else:
ceil = floor
n[i : i + 1] = (floor, depth + 1), (ceil, depth + 1)
return True
return False
def add(l1, l2):
#print('add', l1, 'and', l2)
l = []
for val, level in l1 + l2:
l.append((val, level + 1))
while True:
if not explode(l):
if not split(l):
return l
# The magnitude of a pair is 3 times the magnitude of its left element
# plus 2 times the magnitude of its right element.
# The magnitude of a regular number is just that number.
def mag(l):
while True:
for i in range(len(l) - 1):
v1, d1 = l[i]
v2, d2 = l[i + 1]
if d1 == d2:
# depths equal
l[i : i + 2] = [(3 * v1 + 2 * v2, d2 - 1)]
break
else:
break
return l[0][0]
def magnitude(l):
newl = l[0]
for e in l[1:]:
# add does the exploding and splitting
newl = add(newl, e)
return mag(newl)
a = input_as_lines(sys.argv[1])
fl = []
for line in a:
fl.append(flatten(line))
print('part 1:', magnitude(fl))
mags = []
for p1, p2 in permutations(fl, 2):
mags.append(magnitude([p1] + [p2]))
print('part 2:', max(mags))
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import io
import json
import math
import os
import tarfile
import zipfile
import flask
import werkzeug.exceptions
from . import images as model_images
from . import ModelJob
from digits.pretrained_model.job import PretrainedModelJob
from digits import frameworks, extensions
from digits.utils import auth
from digits.utils.routing import request_wants_json
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/<job_id>.json', methods=['GET'])
@blueprint.route('/<job_id>', methods=['GET'])
def show(job_id):
"""
Show a ModelJob
Returns JSON when requested:
{id, name, directory, status, snapshots: [epoch,epoch,...]}
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
related_jobs = scheduler.get_related_jobs(job)
if request_wants_json():
return flask.jsonify(job.json_dict(True))
else:
if isinstance(job, model_images.ImageClassificationModelJob):
return model_images.classification.views.show(job, related_jobs=related_jobs)
elif isinstance(job, model_images.GenericImageModelJob):
return model_images.generic.views.show(job, related_jobs=related_jobs)
else:
raise werkzeug.exceptions.BadRequest(
'Invalid job type')
@blueprint.route('/customize', methods=['POST'])
def customize():
"""
Returns a customized file for the ModelJob based on completed form fields
"""
network = flask.request.args['network']
framework = flask.request.args.get('framework')
if not network:
raise werkzeug.exceptions.BadRequest('network not provided')
fw = frameworks.get_framework_by_id(framework)
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(network)
if network_desc:
return json.dumps({'network': network_desc})
# not found in standard networks, looking for matching job
job = scheduler.get_job(network)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
snapshot = None
epoch = float(flask.request.form.get('snapshot_epoch', 0))
if epoch == 0:
pass
elif epoch == -1:
snapshot = job.train_task().pretrained_model
else:
for filename, e in job.train_task().snapshots:
if e == epoch:
snapshot = job.path(filename)
break
if isinstance(job, PretrainedModelJob):
model_def = open(job.get_model_def_path(), 'r')
network = model_def.read()
snapshot = job.get_weights_path()
python_layer = job.get_python_layer_path()
else:
network = job.train_task().get_network_desc()
python_layer = None
return json.dumps({
'network': network,
'snapshot': snapshot,
'python_layer': python_layer
})
@blueprint.route('/view-config/<extension_id>', methods=['GET'])
def view_config(extension_id):
"""
Returns a rendering of a view extension configuration template
"""
extension = extensions.view.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
config_form = extension.get_config_form()
template, context = extension.get_config_template(config_form)
return flask.render_template_string(template, **context)
@blueprint.route('/visualize-network', methods=['POST'])
def visualize_network():
"""
Returns a visualization of the custom network as a string of PNG data
"""
framework = flask.request.args.get('framework')
if not framework:
raise werkzeug.exceptions.BadRequest('framework not provided')
fw = frameworks.get_framework_by_id(framework)
ret = fw.get_network_visualization(flask.request.form['custom_network'])
return ret
@blueprint.route('/visualize-lr', methods=['POST'])
def visualize_lr():
"""
Returns a JSON object of data used to create the learning rate graph
"""
policy = flask.request.form['lr_policy']
# There may be multiple lrs if the learning_rate is swept
lrs = map(float, flask.request.form['learning_rate'].split(','))
if policy == 'fixed':
pass
elif policy == 'step':
step = float(flask.request.form['lr_step_size'])
gamma = float(flask.request.form['lr_step_gamma'])
elif policy == 'multistep':
steps = [float(s) for s in flask.request.form['lr_multistep_values'].split(',')]
current_step = 0
gamma = float(flask.request.form['lr_multistep_gamma'])
elif policy == 'exp':
gamma = float(flask.request.form['lr_exp_gamma'])
elif policy == 'inv':
gamma = float(flask.request.form['lr_inv_gamma'])
power = float(flask.request.form['lr_inv_power'])
elif policy == 'poly':
power = float(flask.request.form['lr_poly_power'])
elif policy == 'sigmoid':
step = float(flask.request.form['lr_sigmoid_step'])
gamma = float(flask.request.form['lr_sigmoid_gamma'])
else:
raise werkzeug.exceptions.BadRequest('Invalid policy')
datalist = []
for j, lr in enumerate(lrs):
data = ['Learning Rate %d' % j]
for i in xrange(101):
if policy == 'fixed':
data.append(lr)
elif policy == 'step':
data.append(lr * math.pow(gamma, math.floor(float(i) / step)))
elif policy == 'multistep':
if current_step < len(steps) and i >= steps[current_step]:
current_step += 1
data.append(lr * math.pow(gamma, current_step))
elif policy == 'exp':
data.append(lr * math.pow(gamma, i))
elif policy == 'inv':
data.append(lr * math.pow(1.0 + gamma * i, -power))
elif policy == 'poly':
data.append(lr * math.pow(1.0 - float(i) / 100, power))
elif policy == 'sigmoid':
data.append(lr / (1.0 + math.exp(gamma * (i - step))))
datalist.append(data)
return json.dumps({'data': {'columns': datalist}})
@auth.requires_login
@blueprint.route('/<job_id>/to_pretrained', methods=['GET', 'POST'])
def to_pretrained(job_id):
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
epoch = -1
# GET ?epoch=n
if 'epoch' in flask.request.args:
epoch = float(flask.request.args['epoch'])
# POST ?snapshot_epoch=n (from form)
elif 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = job.json_dict(verbose=False, epoch=epoch)
task = job.train_task()
snapshot_filename = None
snapshot_filename = task.get_snapshot(epoch)
# Set defaults:
labels_path = None
resize_mode = None
if "labels file" in info:
labels_path = os.path.join(task.dataset.dir(), info["labels file"])
if "image resize mode" in info:
resize_mode = info["image resize mode"]
job = PretrainedModelJob(
snapshot_filename,
os.path.join(job.dir(), task.model_file),
labels_path,
info["framework"],
info["image dimensions"][2],
resize_mode,
info["image dimensions"][0],
info["image dimensions"][1],
username=auth.get_username(),
name=info["name"]
)
scheduler.add_job(job)
return flask.redirect(flask.url_for('digits.views.home', tab=3)), 302
@blueprint.route('/<job_id>/download',
methods=['GET', 'POST'],
defaults={'extension': 'tar.gz'})
@blueprint.route('/<job_id>/download.<extension>',
methods=['GET', 'POST'])
def download(job_id, extension):
"""
Return a tarball of all files required to run the model
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
epoch = -1
# GET ?epoch=n
if 'epoch' in flask.request.args:
epoch = float(flask.request.args['epoch'])
# POST ?snapshot_epoch=n (from form)
elif 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = json.dumps(job.json_dict(verbose=False, epoch=epoch), sort_keys=True, indent=4, separators=(',', ': '))
info_io = io.BytesIO()
info_io.write(info)
b = io.BytesIO()
if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']:
# tar file
mode = ''
if extension in ['tar.gz', 'tgz']:
mode = 'gz'
elif extension in ['tar.bz2']:
mode = 'bz2'
with tarfile.open(fileobj=b, mode='w:%s' % mode) as tf:
for path, name in job.download_files(epoch):
tf.add(path, arcname=name)
tf_info = tarfile.TarInfo("info.json")
tf_info.size = len(info_io.getvalue())
info_io.seek(0)
tf.addfile(tf_info, info_io)
elif extension in ['zip']:
with zipfile.ZipFile(b, 'w') as zf:
for path, name in job.download_files(epoch):
zf.write(path, arcname=name)
zf.writestr("info.json", info_io.getvalue())
else:
raise werkzeug.exceptions.BadRequest('Invalid extension')
response = flask.make_response(b.getvalue())
response.headers['Content-Disposition'] = 'attachment; filename=%s_epoch_%s.%s' % (job.id(), epoch, extension)
return response
class JobBasicInfo(object):
def __init__(self, name, ID, status, time, framework_id):
self.name = name
self.id = ID
self.status = status
self.time = time
self.framework_id = framework_id
class ColumnType(object):
def __init__(self, name, has_suffix, find_fn):
self.name = name
self.has_suffix = has_suffix
self.find_from_list = find_fn
def label(self, attr):
if self.has_suffix:
return '{} {}'.format(attr, self.name)
else:
return attr
def get_column_attrs():
job_outs = [set(j.train_task().train_outputs.keys() + j.train_task().val_outputs.keys())
for j in scheduler.jobs.values() if isinstance(j, ModelJob)]
return reduce(lambda acc, j: acc.union(j), job_outs, set())
|
'''面试题43:1~n整数中1出现的次数
输入一个整数n,求1~n这n个整数的十进制表示中1出现的次数。
--------------
Example:
input:12
output: 5 # 1,10,11,12
'''
def nums_of_1(n):
if n < 0:
return 0
return __nums_of_1(str(n), 0)
def __nums_of_1(n_str, idx):
if n_str is None or idx == len(n_str) or n_str[idx] < '0' or n_str[idx] > '9':
return 0
first = int(n_str[idx])
# 个位判断
if idx == len(n_str)-1 and first > 0:
return 1
if idx == len(n_str)-1 and first == 0:
return 0
# 首位判断
if first > 1:
digit_1 = pow(10,len(n_str) - 1 - idx)
if first == 1:
digit_1 = int(n_str[idx+1:])+ 1
# 确认首位后,其他位置的排列组合数
digit_2 = first * (len(n_str) - 1 - idx) * pow(10,len(n_str) - 2 - idx)
# len(n_str) - idx 位数中 1 出现的次数
digit_3 = __nums_of_1(n_str, idx + 1)
return digit_1 + digit_2 + digit_3
if __name__ == '__main__':
print(nums_of_1(12))
|
#!/usr/bin/env python
"""Working with nested data hands-on exercise / coding challenge."""
"""08-15-21"""
import json
import os
# Get the absolute path for the directory where this file is located "here"
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "interfaces.json")) as file:
# TODO: Parse the contents of the JSON file into a variable
json_data = json.loads(file.read())
# TODO: Loop through the interfaces in the JSON data and print out each
# interface's name, ip, and netmask.
for interface in json_data["ietf-interfaces:interfaces"]["interface"]:
print("{name}: {ip} {netmask}".format(
name = interface["name"],
ip = interface["ietf-ip:ipv4"]["address"][0]["ip"],
netmask = interface["ietf-ip:ipv4"]["address"][0]["netmask"]
))
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from std_msgs/ByteMultiArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class ByteMultiArray(genpy.Message):
_md5sum = "70ea476cbcfd65ac2f68f3cda1e891fe"
_type = "std_msgs/ByteMultiArray"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Please look at the MultiArrayLayout message definition for
# documentation on all multiarrays.
MultiArrayLayout layout # specification of data layout
byte[] data # array of data
================================================================================
MSG: std_msgs/MultiArrayLayout
# The multiarray declares a generic multi-dimensional array of a
# particular data type. Dimensions are ordered from outer most
# to inner most.
MultiArrayDimension[] dim # Array of dimension properties
uint32 data_offset # padding elements at front of data
# Accessors should ALWAYS be written in terms of dimension stride
# and specified outer-most dimension first.
#
# multiarray(i,j,k) = data[data_offset + dim_stride[1]*i + dim_stride[2]*j + k]
#
# A standard, 3-channel 640x480 image with interleaved color channels
# would be specified as:
#
# dim[0].label = "height"
# dim[0].size = 480
# dim[0].stride = 3*640*480 = 921600 (note dim[0] stride is just size of image)
# dim[1].label = "width"
# dim[1].size = 640
# dim[1].stride = 3*640 = 1920
# dim[2].label = "channel"
# dim[2].size = 3
# dim[2].stride = 3
#
# multiarray(i,j,k) refers to the ith row, jth column, and kth channel.
================================================================================
MSG: std_msgs/MultiArrayDimension
string label # label of given dimension
uint32 size # size of given dimension (in type units)
uint32 stride # stride of given dimension"""
__slots__ = ['layout','data']
_slot_types = ['std_msgs/MultiArrayLayout','byte[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
layout,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ByteMultiArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.layout is None:
self.layout = std_msgs.msg.MultiArrayLayout()
if self.data is None:
self.data = []
else:
self.layout = std_msgs.msg.MultiArrayLayout()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.layout.dim)
buff.write(_struct_I.pack(length))
for val1 in self.layout.dim:
_x = val1.label
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_2I.pack(_x.size, _x.stride))
buff.write(_struct_I.pack(self.layout.data_offset))
length = len(self.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.data))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.layout is None:
self.layout = std_msgs.msg.MultiArrayLayout()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.layout.dim = []
for i in range(0, length):
val1 = std_msgs.msg.MultiArrayDimension()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.label = str[start:end].decode('utf-8')
else:
val1.label = str[start:end]
_x = val1
start = end
end += 8
(_x.size, _x.stride,) = _struct_2I.unpack(str[start:end])
self.layout.dim.append(val1)
start = end
end += 4
(self.layout.data_offset,) = _struct_I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.data = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.layout.dim)
buff.write(_struct_I.pack(length))
for val1 in self.layout.dim:
_x = val1.label
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_2I.pack(_x.size, _x.stride))
buff.write(_struct_I.pack(self.layout.data_offset))
length = len(self.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.data.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.layout is None:
self.layout = std_msgs.msg.MultiArrayLayout()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.layout.dim = []
for i in range(0, length):
val1 = std_msgs.msg.MultiArrayDimension()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.label = str[start:end].decode('utf-8')
else:
val1.label = str[start:end]
_x = val1
start = end
end += 8
(_x.size, _x.stride,) = _struct_2I.unpack(str[start:end])
self.layout.dim.append(val1)
start = end
end += 4
(self.layout.data_offset,) = _struct_I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2I = struct.Struct("<2I")
|
from bc.agent.random import RandomAgent
from bc.agent.bc import BCAgent
from bc.agent.regression import RegressionAgent
from bc.agent.rl import RLAgent
|
# imports for mathematical functions
import numpy as np
from numpy import nanmean, nan
import sys
from scipy.spatial import distance
import pandas as pd
def __cluster_assignment(data, cluster_centers, N, K):
""" Assign each point in the dataset to a cluster based on its distance from cluster centers
This is a helper method for the main kPOD functionality. It
executes the cluster assignment part of the algorithm.
Parameters
----------
data: {array-like, sparse matrix} of shape (N, P)
Data to predict clusters for.
cluster_centers: {array-like, sparse matrix} of shape (K,)
Central point of each of the K clusters.
N: int
The number of observations in the data.
K: int
The number of clusters to assign centers for.
Returns
-------
cluster_assignment: ndarray of shape (N,)
The cluster index that each data point was assigned to.
"""
# set empty distance array with length of num clusters
cluster_assignment = np.zeros(N)
dist = np.zeros(K)
# iterate through observations
for num in range(0,N):
# iterate through each cluster
for cluster in range(K):
# assign distance between point and cluster center
dist[cluster] = distance.euclidean(data[num], cluster_centers[cluster])
# assign point to cluster center with lowest distance
cluster_assignment[num] = np.argmin(dist)
# return the cluster assignments for this iteration
return cluster_assignment
def __move_centroids(data, cluster_centers, cluster_assignment, N, K):
""" Move each cluster centroid to the mean location of the points that are assigned to it.
This is a helper method for the main kPOD functionality. It
executes the move cluster centroids part of the algorithm.
Parameters
----------
data: {array-like, sparse matrix} of shape (N, P)
Data to predict clusters for.
cluster_centers: {array-like, sparse matrix} of shape (K,)
Central point of each of the K clusters.
cluster_assignment: {array-like, sparse matrix} of shape (N,)
Array containing the cluster index that each data point was assigned to.
N: int
The number of observations in the data.
K: int
The number of clusters to assign centers for.
Returns
-------
cluster_assignment: ndarray of shape (N,)
The cluster index that each data point was assigned to.
"""
# iterate through each cluster
for num in range(1, K+1):
# make empty array cluster points
cluster_points = list()
# iterate through each data point
for i in range(0, N):
# if the cluster is assigned to this centroid, add it to the list of cluster points
if int(cluster_assignment[i]) == (num-1):
# add data point to list of cluster points
cluster_points.append(data[i])
# convert the cluster points to an ndarray
cluster_points = np.array(cluster_points)
# set the new cluster centroid location to the main of the points it is assigned to
cluster_centers[num-1] = cluster_points.mean(axis=0)
# return moved cluster centers
return cluster_centers
def __check_convergence(cluster_centers, past_centroids, tol, num_iters):
""" Ensure that each cluster center is within the tolerance level of the last centroid.
This is a helper method for the main kPOD functionality. It
executes the check convergence part of the algorithm.
Parameters
----------
cluster_centers: {array-like, sparse matrix} of shape (K,)
Central point of each of the K clusters.
past_centroids: {array-like, sparse matrix} of shape (K,)
Array containing central points from the last kPOD iteration.
tol: float
The tolerance for each cluster center and its past centroid.
num_iters: int
Number of iterations of the algorithm.
Returns
-------
centroids_complete: boolean
True if the cluster centers have converged, False otherwise.
"""
# if it is the first iteration, algorithm has not converged
if num_iters == 0:
return False
# set initial complete to 0
centroids_complete = 0
# check if k-means is complete
for i in range(len(cluster_centers)):
# if the distance between this centroid and the past centroid is less than tolerance
if (distance.euclidean(cluster_centers[i], past_centroids[i]) <= tol):
# add centroid to the list of complete centroids
centroids_complete += 1
# return list of centroids that have converged
return centroids_complete
def __fill_data(MISSING_DATA, cluster_centers, cluster_assignment):
""" Fill missing data with the average values for each data point's cluster.
This is a helper method for the main kPOD functionality. It
executes the fill data part of the algorithm.
Parameters
----------
MISSING_DATA: {array-like, sparse matrix} of shape (N,P)
Data with missing values.
cluster_centers: {array-like, sparse matrix} of shape (K,)
Central point of each of the K clusters.
cluster_assignment: {array-like, sparse matrix} of shape (N,)
Array containing the cluster index that each data point was assigned to.
Returns
-------
filled_data: {array-like, sparse matrix} of shape (N,P)
Data with all nan values filled.
"""
# save filled data as copy of missing data
filled_data = np.array(MISSING_DATA.copy())
# iterate through missing data
for i in range(len(filled_data)):
# set current cluster as cluster assignment of this data point
obs_cluster = int(cluster_assignment[i])
# reset counter to 0
j = 0
# iterate through each value
for val in filled_data[i]:
# if value is empty, replace it with cluster center value
if (np.isnan(val)):
# replace value with cluster center value (mean of its dimension)
filled_data[i][j] = cluster_centers[obs_cluster][j]
# increment counter
j+=1
# return data with all nan values filled
return filled_data
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nvflare.apis.dxo import DXO, DataKind, MetaKey
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext, FLContextManager
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.handlers.intime_model_selection_handler import IntimeModelSelectionHandler
class MockSimpleEngine:
def __init__(self, run_num=0):
self.fl_ctx_mgr = FLContextManager(
engine=self,
identity_name="__mock_simple_engine",
run_num=run_num,
public_stickers={},
private_stickers={},
)
self.last_event = None
def new_context(self):
return self.fl_ctx_mgr.new_context()
def fire_event(self, event_type: str, fl_ctx: FLContext):
self.last_event = event_type
return True
class TestInTimeModelSelectionHandler:
@pytest.mark.parametrize("mshandler", [IntimeModelSelectionHandler])
@pytest.mark.parametrize(
"initial,received,expected",
[
(
1,
{
"client1": {"weight": 0.5, "iter_number": 1, "metric": 10},
},
True,
),
(
1,
{
"client1": {"weight": 0.5, "iter_number": 1, "metric": 1},
"client2": {"weight": 0.5, "iter_number": 1, "metric": 0.2},
},
False,
),
],
)
def test_model_selection(self, mshandler, initial, received, expected):
aggregation_weights = {k: v["weight"] for k, v in received.items()}
handler = mshandler(aggregation_weights=aggregation_weights)
handler.best_val_metric = initial
engine = MockSimpleEngine()
for k, v in received.items():
peer_ctx = FLContext()
peer_ctx.set_prop(FLContextKey.CLIENT_NAME, k, private=False)
dxo = DXO(
DataKind.WEIGHT_DIFF,
data=dict(),
meta={
MetaKey.INITIAL_METRICS: v["metric"],
MetaKey.NUM_STEPS_CURRENT_ROUND: v["iter_number"],
AppConstants.CURRENT_ROUND: 10,
},
)
peer_ctx.set_prop(FLContextKey.SHAREABLE, dxo.to_shareable(), private=False)
fl_ctx = engine.fl_ctx_mgr.new_context()
fl_ctx.set_prop(FLContextKey.PEER_CONTEXT, peer_ctx)
handler.handle_event(EventType.BEFORE_PROCESS_SUBMISSION, fl_ctx)
handler.handle_event(AppEventType.BEFORE_AGGREGATION, fl_ctx)
assert (engine.last_event == AppEventType.GLOBAL_BEST_MODEL_AVAILABLE) == expected
|
# Acknowledgement: The implementation of Anisotropic Diffusion Filtering is borrowed from Alistair Muldal's code (https://pastebin.com/sBsPX4Y7). We thank them for this
import numpy as np
import warnings
def anisodiff(img,niter=1,kappa=50,gamma=0.1,step=(1.,1.),option=1,ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<alistair.muldal@pharm.ox.ac.uk>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(img,interpolation='nearest')
ih = ax2.imshow(imgout,interpolation='nearest',animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[: ,:-1] = np.diff(imgout,axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
# update the image
imgout += gamma*(NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return imgout
|
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Optional, Any
# ===================================
# Schema for:
# * 'show cts sxp connections brief'
# ===================================
class ShowCtsSxpConnectionsBriefSchema(MetaParser):
"""Schema for show cts sxp connections brief."""
schema = {
"sxp_connections": {
"total_sxp_connections": int,
"status": {
"sxp_status": str,
"highest_version": int,
"default_pw": str,
Optional("key_chain"): str,
Optional("key_chain_name"): str,
"source_ip": str,
"conn_retry": int,
"reconcile_secs": int,
"retry_timer": str,
"peer_sequence_traverse_limit_for_export": str,
"peer_sequence_traverse_limit_for_import":str
},
Optional("sxp_peers"): {
str: {
"source_ip": str,
"conn_status": str,
"duration": str
}
}
}
}
# ===================================
# Parser for:
# * 'show cts sxp connections brief'
# * 'Parser for show cts sxp connections vrf {vrf} brief'
# ===================================
class ShowCtsSxpConnectionsBrief(ShowCtsSxpConnectionsBriefSchema):
"""Parser for show cts sxp connections brief"""
"""Parser for show cts sxp connections vrf {vrf} brief"""
cli_command = ['show cts sxp connections brief', 'show cts sxp connections vrf {vrf} brief']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command[0])
else:
out = output
sxp_dict = {}
# There are no SXP Connections.
# SXP : Enabled
# Highest Version Supported: 4
# Default Password : Set
# Default Key-Chain: Not Set
# Default Key-Chain Name: Not Applicable
# Default Source IP: 192.168.2.24
# Connection retry open period: 120 secs
# Reconcile period: 120 secs
# Retry open timer is not running
# Peer-Sequence traverse limit for export: Not Set
# Peer-Sequence traverse limit for import: Not Set
#
# ----------------------------------------------------------------------------------------------------------------------------------
# Peer_IP Source_IP Conn Status Duration
# ----------------------------------------------------------------------------------------------------------------------------------
# 10.100.123.1 192.168.2.24 On 44:19:54:52 (dd:hr:mm:sec)
# 10.100.123.2 192.168.2.24 On 44:19:54:52 (dd:hr:mm:sec)
# 10.100.123.3 192.168.2.24 On 44:19:54:52 (dd:hr:mm:sec)
# 10.100.123.4 192.168.2.24 On 44:19:54:52 (dd:hr:mm:sec)
# 10.100.123.5 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
# 10.100.123.6 192.168.2.24 On 20:12:53:40 (dd:hr:mm:sec)
# 10.100.123.7 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
# 10.100.123.8 192.168.2.24 On 20:12:40:41 (dd:hr:mm:sec)
# 10.100.123.9 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
# 10.100.123.10 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
# 10.100.123.11 192.168.2.24 On 44:22:21:10 (dd:hr:mm:sec)
# 10.100.123.12 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
# 10.100.123.13 192.168.2.24 On 45:08:24:37 (dd:hr:mm:sec)
# 10.100.123.14 192.168.2.24 On 45:08:24:37 (dd:hr:mm:sec)
# 10.100.123.15 192.168.2.24 On 36:11:31:08 (dd:hr:mm:sec)
# 10.100.123.16 192.168.2.24 On 36:12:13:50 (dd:hr:mm:sec)
#
# Total num of SXP Connections = 16
# SXP : Enabled
p1 = re.compile(r"\s(?P<sxp_status>(Disabled|Enabled))")
# Highest Version Supported: 4
p2 = re.compile(r"\s+(?P<highest_version>\d+)")
# Default Password : Set
p3 = re.compile(r"\s+(?P<default_pw>(Not\s+Set|Set))")
# Default Key-Chain: Not Set
p4 = re.compile(r"\s+(?P<key_chain>(Not\s+Set|Set))")
# Default Source IP: 192.168.2.24
p5 = re.compile(r"\s+(?P<key_chain_name>(Not\s+Applicable|\S+))")
# Default Source IP: 192.168.2.24
p6 = re.compile(r"\s+(?P<source_ip>(Not\s+Set|\d+\.\d+\.\d+\.\d+))")
# Connection retry open period: 120 secs
p7 = re.compile(r"\s+(?P<conn_retry>\d+)")
# Reconcile period: 120 secs
p8 = re.compile(r"\s+(?P<reconcile_secs>\d+)")
# Peer-Sequence traverse limit for export: Not Set
p9 = re.compile(r"\s+(?P<peer_sequence_traverse_limit_for_export>(Not\s+Set|\S+))")
# Peer-Sequence traverse limit for import: Not Set
p10 = re.compile(r"\s+(?P<peer_sequence_traverse_limit_for_import>(Not\s+Set|\S+))")
# Retry open timer is not running
p11 = re.compile(r"Retry\s+open\s+timer\s+is\s+(?P<retry_timer>(not\s+running|running))")
# 10.100.123.12 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
p12 = re.compile(
r"(?P<peer_ip>\d+\.\d+\.\d+\.\d+)\s+(?P<source_ip>\d+\.\d+\.\d+\.\d+)\s+(?P<conn_status>\S+)\s+(?P<duration>\d+:\d+:\d+:\d+)")
# Total num of SXP Connections = 16
p13 = re.compile(r"^Total\s+num\s+of\s+SXP\s+Connections\s+=\s+(?P<total_sxp_connections>\d+)")
# This regex map will be used to split the captured line using ':' as the delimeter
# if it starts with this string, we will use this regex pattern.
regex_map = {
"SXP": p1,
"Highest Version Supported": p2,
"Default Password": p3,
"Default Key-Chain": p4,
"Default Key-Chain Name": p5,
"Default Source IP": p6,
"Connection retry open period": p7,
"Reconcile period": p8,
"Peer-Sequence traverse limit for export": p9,
"Peer-Sequence traverse limit for import": p10,
"Retry open timer is not running": p11,
}
# Remove lines with these leading strings
remove_lines = ('---', 'Peer_IP')
# Remove unwanted lines from raw text
def filter_lines(raw_output, remove_lines):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
# Remove lines unwanted lines from list of "remove_lines"
if clean_line_strip.startswith(remove_lines):
clean_lines.remove(clean_line)
return clean_lines
out = filter_lines(raw_output=out, remove_lines=remove_lines)
for line in out:
line_strip = line.strip()
# ':' Will match lines with a colon and will use regex match and assign Key Value based on match.
if ": " in line:
try:
data_type, value = line_strip.split(':', 1)
regex = regex_map.get(data_type.strip())
except ValueError:
continue
# Retry open is a one off match that doesn't have a colon.
elif "Retry open" in line:
# Retry open timer is not running
match = p11.match(line_strip)
if match:
groups = match.groupdict()
retry_timer = groups['retry_timer']
if not sxp_dict.get('sxp_connections'):
sxp_dict.update({"sxp_connections": {}})
if not sxp_dict['sxp_connections'].get('status'):
sxp_dict['sxp_connections'].update({"status": {}})
sxp_dict["sxp_connections"]['status'].update({'retry_timer': retry_timer})
continue
elif "Total num of SXP Connections" in line:
# Total num of SXP Connections = 16
match = p13.match(line_strip)
if match:
groups = match.groupdict()
total_sxp_connections = int(groups['total_sxp_connections'])
sxp_dict["sxp_connections"]['total_sxp_connections'] = total_sxp_connections
continue
# All other lines in the output should be p12 and captures peer_ip, source_ip, conn_status, and duration
else:
# 10.100.123.12 192.168.2.24 On 44:18:58:47 (dd:hr:mm:sec)
match = p12.match(line_strip)
if match:
groups = match.groupdict()
peer_ip = groups['peer_ip']
source_ip = groups['source_ip']
conn_status = groups['conn_status']
duration = groups['duration']
if not sxp_dict.get('sxp_connections'):
sxp_dict.update({"sxp_connections": {}})
if not sxp_dict['sxp_connections'].get('sxp_peers'):
sxp_dict['sxp_connections'].update({"sxp_peers": {}})
sxp_dict['sxp_connections']['sxp_peers'].update({
peer_ip: {
'source_ip': source_ip,
'conn_status': conn_status,
'duration': duration
}})
continue
# After all captures are completed, if a regex match exists, assign a key/value to the root dict key.
if regex:
match = regex.match(value)
if match:
groups = match.groupdict()
for k, v in groups.items():
if v is None:
continue
if v.isdigit():
v = int(v)
if not sxp_dict.get('sxp_connections'):
sxp_dict.update({"sxp_connections": {}})
if not sxp_dict['sxp_connections'].get('status'):
sxp_dict['sxp_connections'].update({"status": {}})
sxp_dict['sxp_connections']['status'].update({k: v})
if sxp_dict:
return sxp_dict
else:
return {}
# ==================
# Schema for:
# * 'show cts pacs'
# ==================
class ShowCtsPacsSchema(MetaParser):
"""Schema for show cts pacs."""
schema = {
"aid": str,
"pac_info": {
"aid": str,
"pac_type": str,
"i_id": str,
"a_id_info": str,
"credential_lifetime": str,
},
"pac_opaque": str,
"refresh_timer": str,
}
# ==================
# Parser for:
# * 'show cts pacs'
# ==================
class ShowCtsPacs(ShowCtsPacsSchema):
"""Parser for show cts pacs"""
cli_command = 'show cts pacs'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# AID: 1100E046659D4275B644BF946EFA49CD
# PAC-Info:
# PAC-type = Cisco Trustsec
# AID: 1100E046659D4275B644BF946EFA49CD
# I-ID: gw1
# A-ID-Info: Identity Services Engine
# Credential Lifetime: 19:56:32 PDT Sun Sep 06 2020
# PAC-Opaque: 000200B80003000100040010207FCE2A590A44BA0DE959740A348AF00006009C00030100F57E4D71BDE3BD2850B2B63C92E18122000000135EDA996F00093A805A004010F4EDAF81FB6900D03013E907ED81BFB83EE273B8E563BE48DC16B2E9164B1AA6711281937B734E8C449280FCEAF4BE668545B5A55BE20C6346C42AFFCA87FFDDA0AC6A480F9AEE147541EE51FB67CDE0580FD8A746978C78C2CB9E7855BB1667469896AB18902424344AC094B3162EF09488CDB0D6A95139
# Refresh timer is set for 6w3d
cts_pacs_dict = {}
# AID: 1100E046659D4275B644BF946EFA49CD
aid_capture = re.compile(r"^AID:\s+(?P<aid>\S+)")
# PAC-type = Cisco Trustsec
pac_type_capture = re.compile(r"^PAC-type\s=\s(?P<pac_type>.*$)")
# I-ID: gw1
iid_capture = re.compile(r"^I-ID:\s+(?P<iid>\S+)")
# A-ID-Info: Identity Services Engine
aid_info_capture = re.compile(r"^A-ID-Info:\s+(?P<aid_info>.*$)")
# Credential Lifetime: 19:56:32 PDT Sun Sep 06 2020
credential_lifetime_capture = re.compile(
r"^Credential\s+Lifetime:\s+(?P<time>\d+:\d+:\d+)\s+(?P<time_zone>\S+)\s+(?P<day>\S+)\s+(?P<month>\S+)\s+(?P<date>\d+)\s+(?P<year>\d+)")
# PAC - Opaque: 000200B80003000100040010207FCE2A590A44BA0DE959740A348AF00006009C00030100F57E4D71BDE3BD2850B2B63C92E18122000000135EDA996F00093A805A004010F4EDAF81FB6900D03013E907ED81BFB83EE273B8E563BE48DC16B2E9164B1AA6711281937B734E8C449280FCEAF4BE668545B5A55BE20C6346C42AFFCA87FFDDA0AC6A480F9AEE147541EE51FB67CDE0580FD8A746978C78C2CB9E7855BB1667469896AB18902424344AC094B3162EF09488CDB0D6A95139
pac_opaque_capture = re.compile(r"^PAC-Opaque:\s+(?P<pac_opaque>.*$)")
# Refresh timer is set for 6w3d
refresh_timer_capture = re.compile(r"^Refresh\s+timer\s+is\s+set\s+for\s+(?P<refresh_timer>\S+)")
remove_lines = ('PAC-Info:')
# Remove unwanted lines from raw text
def filter_lines(raw_output, remove_lines):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
rendered_lines = []
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
# print(clean_line)
# Remove lines unwanted lines from list of "remove_lines"
if not clean_line_strip.startswith(remove_lines):
rendered_lines.append(clean_line_strip)
return rendered_lines
out = filter_lines(raw_output=out, remove_lines=remove_lines)
for line in out:
# AID: 1100E046659D4275B644BF946EFA49CD
aid_match = aid_capture.match(line)
if aid_match:
groups = aid_match.groupdict()
aid = groups['aid']
if not cts_pacs_dict.get('aid', {}):
cts_pacs_dict['aid'] = aid
if not cts_pacs_dict.get('pac_info', {}):
cts_pacs_dict['pac_info'] = {}
cts_pacs_dict['pac_info']['aid'] = aid
continue
# PAC-type = Cisco Trustsec
pac_type_match = pac_type_capture.match(line)
if pac_type_match:
groups = pac_type_match.groupdict()
pac_type = groups['pac_type']
if not cts_pacs_dict.get('pac_info', {}):
cts_pacs_dict['pac_info'] = {}
cts_pacs_dict['pac_info']['pac_type'] = pac_type
continue
# I-ID: gw1
iid_match = iid_capture.match(line)
if iid_match:
groups = iid_match.groupdict()
iid = groups['iid']
cts_pacs_dict['pac_info']['i_id'] = iid
continue
# A-ID-Info: Identity Services Engine
aid_info_match = aid_info_capture.match(line)
if aid_info_match:
groups = aid_info_match.groupdict()
aid_info = groups['aid_info']
cts_pacs_dict['pac_info']['a_id_info'] = aid_info
continue
# Credential Lifetime: 19:56:32 PDT Sun Sep 06 2020
credential_lifetime_match = credential_lifetime_capture.match(line)
if credential_lifetime_match:
groups = credential_lifetime_match.groupdict()
time = groups['time']
time_zone = groups['time_zone']
day = groups['day']
month = groups['month']
date = groups['date']
year = groups['year']
full_date = f"{day}, {month}/{date}/{year}"
cts_pacs_dict['pac_info']['credential_lifetime'] = full_date
continue
# PAC - Opaque: 000200B80003000100040010207FCE2A590A44BA0DE959740A348AF00006009C00030100F57E4D71BDE3BD2850B2B63C92E18122000000135EDA996F00093A805A004010F4EDAF81FB6900D03013E907ED81BFB83EE273B8E563BE48DC16B2E9164B1AA6711281937B734E8C449280FCEAF4BE668545B5A55BE20C6346C42AFFCA87FFDDA0AC6A480F9AEE147541EE51FB67CDE0580FD8A746978C78C2CB9E7855BB1667469896AB18902424344AC094B3162EF09488CDB0D6A95139
pac_opaque_match = pac_opaque_capture.match(line)
if pac_opaque_match:
groups = pac_opaque_match.groupdict()
pac_opaque = groups['pac_opaque']
cts_pacs_dict['pac_opaque'] = pac_opaque
continue
# Refresh timer is set for 6w3d
refresh_timer_match = refresh_timer_capture.match(line)
if refresh_timer_match:
groups = refresh_timer_match.groupdict()
refresh_timer = groups['refresh_timer']
cts_pacs_dict['refresh_timer'] = refresh_timer
continue
return cts_pacs_dict
# =================================
# Schema for:
# * 'show cts role-based counters'
# =================================
class ShowCtsRoleBasedCountersSchema(MetaParser):
"""Schema for show cts role-based counters."""
schema = {
"cts_rb_count": {
int: {
"src_group": str,
"dst_group": str,
"sw_denied_count": int,
"hw_denied_count": int,
"sw_permit_count": int,
"hw_permit_count": int,
"sw_monitor_count": int,
"hw_monitor_count": int
}
}
}
# =================================
# Parser for:
# * 'show cts role-based counters'
# =================================
class ShowCtsRoleBasedCounters(ShowCtsRoleBasedCountersSchema):
"""Parser for show cts role-based counters"""
cli_command = 'show cts role-based counters'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
cts_rb_count_dict = {}
# Role-based IPv4 counters
# From To SW-Denied HW-Denied SW-Permitt HW-Permitt SW-Monitor HW-Monitor
# * * 0 0 2 30802626587 0 0
# 2 0 0 4794060 0 0 0 0
# 7 0 0 0 0 0 0 0
# 99 0 0 0 0 0 0 0
rb_counters_capture = re.compile(r"^(?P<src_group>(\d+|\*))\s+(?P<dst_group>(\d+|\*))\s+"
r"(?P<sw_denied_count>\d+)\s+(?P<hw_denied_count>\d+)\s+"
r"(?P<sw_permit_count>\d+)\s+(?P<hw_permit_count>\d+)\s+"
r"(?P<sw_monitor_count>\d+)\s+(?P<hw_monitor_count>\d+)")
remove_lines = ('Role-based IPv4 counters', 'From')
# Remove unwanted lines from raw text
def filter_lines(raw_output, remove_lines):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
rendered_lines = []
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
# print(clean_line)
# Remove lines unwanted lines from list of "remove_lines"
if not clean_line_strip.startswith(remove_lines):
rendered_lines.append(clean_line_strip)
return rendered_lines
out = filter_lines(raw_output=out, remove_lines=remove_lines)
rb_count_index = 1
rb_count_data = {}
for line in out:
# * * 0 0 2 30802626587 0 0
if rb_counters_capture.match(line):
rb_counters_match = rb_counters_capture.match(line)
groups = rb_counters_match.groupdict()
if not cts_rb_count_dict.get('cts_rb_count', {}):
cts_rb_count_dict['cts_rb_count'] = {}
if not cts_rb_count_dict['cts_rb_count'].get(rb_count_index, {}):
cts_rb_count_dict['cts_rb_count'][rb_count_index] = {}
for k, v in groups.items():
if v.isdigit() and k not in ['src_group', 'dst_group']:
v = int(v)
rb_count_data.update({k: v})
cts_rb_count_dict['cts_rb_count'][rb_count_index].update(rb_count_data)
rb_count_index = rb_count_index + 1
continue
return cts_rb_count_dict
# =============
# Schema for:
# * 'show cts'
# =============
class ShowCtsSchema(MetaParser):
"""Schema for show cts."""
schema = {
Optional("dot1x_feature"): str,
"cts_device_identity": str,
Optional("cts_sgt_caching"): str,
Optional("cts_caching_support"): str,
Optional("cts_ingress_sgt_caching"): str,
Optional("cts_sg_epg_translation"): str,
Optional("interfaces_in_dot1x_mode"): int,
"interfaces_in_manual_mode": int,
Optional("interfaces_in_l3_trustsec_mode"): int,
"interfaces_in_ifc_states": {
"init": int,
"authenticating": int,
"authorizing": int,
"sap_negotiating": int,
"open": int,
"held": int,
"disconnecting": int,
"invalid": int
},
"cts_events_statistics": {
"authentication_success": int,
"authentication_reject": int,
"authentication_failure": int,
"authentication_logoff": int,
"authentication_no_resp": int,
"authorization_success": int,
"authorization_failure": int,
"sap_success": int,
"sap_failure": int,
"port_auth_fail": int
},
Optional("installed_list"): {
Optional("name"): str,
Optional("count"): int,
Optional("server_ip"): {
Optional(Any()) : {
Optional("port"): int,
Optional("a_id"): str,
Optional("status"): str,
Optional("auto_test"): str,
Optional("keywrap_enable"): str,
Optional("idle_time_mins"): int,
Optional("deadtime_secs"): int
}
}
},
Optional("pac_summary") : {
Optional("pac_info"): {
Optional("pac_valid_until"): str,
}
},
Optional("environment_data_summary"): {
Optional("data_last_recieved"): str,
Optional("data_valid_until"): {
Optional("value"): str,
Optional("value_format"): str
}
},
Optional("sxp_connections_summary"): {
Optional("status"): str,
Optional("highest_supported_version"): int,
Optional("default_password"): str,
Optional("default_key_chain"): str,
Optional("default_key_chain_name"): str,
Optional("default_source_ip"): str,
Optional("retry_open_period_secs"): int,
Optional("reconcile_period_secs"): int,
Optional("retry_open_timer"): str,
Optional("peer_sequence_limit_export"): str,
Optional("peer_sequence_limit_import"): str,
Optional("peer_ip"): {
Optional(Any()): {
Optional("source_ip"): str,
Optional("conn_status"): str,
Optional("duration"): {
Optional("value"): str,
Optional("value_format"): str
}
}
},
Optional("total_connections"): int
},
Optional("ip_sgt_bindings"): {
Optional("ipv4"): {
Optional("total_sxp_bindings"): int,
Optional("total_active_bindings"): int
},
Optional("ipv6"): {
Optional("total_sxp_bindings"): int,
Optional("total_active_bindings"): int
},
Optional("cts_role_based_enforcement"): str,
Optional("cts_role_based_vlan_enforcement"): str
},
Optional("trusted_untrusted_links"): {
Optional("number_trusted_links"): int,
Optional("number_untrusted_links"): int
}
}
# =============
# Parser for:
# * 'show cts'
# =============
class ShowCts(ShowCtsSchema):
"""Parser for show cts"""
cli_command = 'show cts'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
else:
output=output
# CTS device identity: "SJC-ab-gw1"
# CTS global sgt-caching: Disabled
# CTS Ingress sgt-caching: Disabled
# CTS sg-epg translation status: Disabled
#
# Number of CTS interfaces in MANUAL mode: 0
#
# Number of CTS interfaces in corresponding IFC state
# INIT state: 0
# AUTHENTICATING state: 0
# AUTHORIZING state: 0
# SAP_NEGOTIATING state: 0
# OPEN state: 0
# HELD state: 0
# DISCONNECTING state: 0
# INVALID state: 0
#
# CTS events statistics:
# authentication success: 0
# authentication reject : 0
# authentication failure: 0
# authentication logoff : 0
# authentication no resp: 0
# authorization success : 0
# authorization failure : 0
# sap success : 0
# sap failure : 0
# port auth failure : 0
#
# Installed list: CTSServerList1-0089, 7 server(s):
# *Server: 10.100.123.1, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.2, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.3, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.4, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.5, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.6, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.100.123.7, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# ===================
# CTS PAC Summary
# =====================
# PAC-Info:
# PAC Valid Until: 19:56:32 PDT Sep 6 2020
#
#
# ============================
# CTS Environment-Data Summary
# ============================
#
# Environment Data Last Received: 20:04:41 PDT Mon Jul 13 2020
#
# Environment Data Valid Until: 0:09:35:43 (dd:hr:mm:sec)
#
# ===================================
# SXP Connections Summary
# ===================================
# SXP : Enabled
# Highest Version Supported: 4
# Default Password : Set
# Default Key-Chain: Not Set
# Default Key-Chain Name: Not Applicable
# Default Source IP: 192.168.2.24
# Connection retry open period: 120 secs
# Reconcile period: 120 secs
# Retry open timer is not running
# Peer-Sequence traverse limit for export: Not Set
# Peer-Sequence traverse limit for import: Not Set
#
# ----------------------------------------------------------------------------------------------------------------------------------
# Peer_IP Source_IP Conn Status Duration
# ----------------------------------------------------------------------------------------------------------------------------------
# 10.100.123.1 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
# 10.100.123.2 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
# 10.100.123.3 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
# 10.100.123.4 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
# 10.100.123.5 192.168.2.24 On 44:18:58:51 (dd:hr:mm:sec)
# 10.100.123.6 192.168.2.24 On 20:12:53:44 (dd:hr:mm:sec)
# 10.100.123.7 192.168.2.24 On 44:18:58:51 (dd:hr:mm:sec)
# 10.100.123.8 192.168.2.24 On 20:12:40:45 (dd:hr:mm:sec)
# 10.100.123.9 192.168.2.24 On 44:18:58:51 (dd:hr:mm:sec)
# 10.100.123.10 192.168.2.24 On 44:18:58:51 (dd:hr:mm:sec)
# 10.100.123.11 192.168.2.24 On 44:22:21:14 (dd:hr:mm:sec)
# 10.100.123.12 192.168.2.24 On 44:18:58:51 (dd:hr:mm:sec)
# 10.100.123.13 192.168.2.24 On 45:08:24:42 (dd:hr:mm:sec)
# 10.100.123.14 192.168.2.24 On 45:08:24:42 (dd:hr:mm:sec)
# 10.100.123.15 192.168.2.24 On 36:11:31:13 (dd:hr:mm:sec)
# 10.100.123.16 192.168.2.24 On 36:12:13:54 (dd:hr:mm:sec)
#
# Total num of SXP Connections = 16
# ===================
#
# ======================================
# Summary of IPv4 & IPv6 IP-SGT bindings
# ======================================
#
#
# -IPv4-
#
# IP-SGT Active Bindings Summary
# ============================================
# Total number of SXP bindings = 3284
# Total number of active bindings = 3284
#
#
# -IPv6-
#
# IP-SGT Active Bindings Summary
# ============================================
# Total number of SXP bindings = 111
# Total number of active bindings = 111
#
#
# CTS Role Based Enforcement: Enabled
# CTS Role Based VLAN Enforcement:Enabled
#
#
# =================================
# Trusted/Un-Trusted Links
# ==================================
# Number of Trusted interfaces = 0
# Number of Un-Trusted interfaces = 0
# Global Dot1x feature: Disabled
p_dot1x = re.compile(r"^Global\s+Dot1x\s+feature:\s+(?P<dot1x>Enabled|Disabled)$")
# CTS device identity: "AAA2220Q2DP"
p_cts_device = re.compile(r"CTS\s+device\s+identity:\s+\"(?P<cts_identity>\S+)\"$")
# CTS caching support: disabled
p_cts_cache = re.compile(r"^CTS\s+caching\s+support:\s+(?P<cts_cache>enabled|disabled)$")
# CTS sgt-caching global: Disabled
p_cts_sgt = re.compile(r"^CTS\s+sgt-caching\s+global:\s+(?P<cts_sgt>Enabled|Disabled)$")
# CTS global sgt-caching: Disabled
p_cts_2_sgt = re.compile(r"^CTS\s+global\s+sgt-caching:\s+(?P<cts_2_sgt>Enabled|Disabled)$")
# CTS Ingress sgt-caching: Disabled
p_cts_ingress = re.compile(r"^CTS\s+Ingress\s+sgt-caching:\s+(?P<cts_ingress>Enabled|Disabled)")
# CTS sg-epg translation status: Disabled
p_cts_translation = re.compile(r"^CTS\s+sg-epg\s+translation\s+status:\s+(?P<cts_trans>Enabled|Disabled)")
# Number of CTS interfaces in MANUAL mode: 0
p_man_mode = re.compile(r"^Number\s+of\s+CTS\s+interfaces\s+in\s+MANUAL\s+mode:\s+(?P<man_mode>\d+)$")
# Number of CTS interfaces in DOT1X mode: 0, MANUAL mode: 0
p_dot1x_man_mode = re.compile(
r"^Number\s+of\s+CTS\s+interfaces\s+in\s+DOT1X\s+mode:\s+(?P<dot1x_mode>\d+),\s+MANUAL\s+mode:\s+(?P<man_mode>\d+)$")
# Number of CTS interfaces in LAYER3 TrustSec mode: 0
p_l3_mode = re.compile(r"^Number\s+of\s+CTS\s+interfaces\s+in\s+LAYER3\s+TrustSec\s+mode:\s+(?P<l3_mode>\d+)$")
# Number of CTS interfaces in corresponding IFC state
p_ifc_state = re.compile(r"^Number\s+of\s+CTS\s+interfaces\s+in\s+corresponding\s+IFC\s+state$")
# INIT state: 0
p_init = re.compile(r"^INIT\s+state:\s+(?P<init>\d+)$")
# AUTHENTICATING state: 0
p_authenticating = re.compile(r"^AUTHENTICATING\s+state:\s+(?P<auth>\d+)$")
# AUTHORIZING state: 0
p_authorizing = re.compile(r"^AUTHORIZING\s+state:\s+(?P<authorizing>\d+)$")
# SAP_NEGOTIATING state: 0
p_sap = re.compile(r"^SAP_NEGOTIATING\s+state:\s+(?P<sap>\d+)$")
# OPEN state: 0
p_open = re.compile(r"^OPEN\s+state:\s+(?P<open>\d+)$")
# HELD state: 0
p_held = re.compile(r"^HELD\s+state:\s+(?P<held>\d+)$")
# DISCONNECTING state: 0
p_disconnect = re.compile(r"^DISCONNECTING\s+state:\s+(?P<disconnect>\d+)$")
# INVALID state: 0
p_invalid = re.compile(r"^INVALID\s+state:\s+(?P<invalid>\d+)$")
# CTS events statistics:
p_cts_event = re.compile(r"^CTS\s+events\s+statistics:$")
# authentication success: 0
p_stat_authentication = re.compile(r"^authentication\s+success:\s+(?P<cts_authentication>\d+)$")
# authentication reject: 0
p_stat_reject = re.compile(r"^authentication\s+reject\s+:\s+(?P<cts_reject>\d+)$")
# authentication failure: 0
p_stat_failure = re.compile(r"^authentication\s+failure:\s+(?P<cts_failure>\d+)$")
# authentication logoff: 0
p_stat_logoff = re.compile(r"^authentication\s+logoff\s+:\s+(?P<cts_logoff>\d+)$")
# authentication no resp: 0
p_stat_noresp = re.compile(r"^authentication\s+no\s+resp:\s+(?P<cts_noresp>\d+)$")
# authorization success: 0
p_stat_authorization = re.compile(r"^authorization\s+success\s+:\s+(?P<cts_authorization>\d+)$")
# authorization failure: 0
p_stat_authorization_fail = re.compile(r"^authorization\s+failure\s+:\s+(?P<cts_authorization_fail>\d+)$")
# sap success: 0
p_stat_sap = re.compile(r"^sap\s+success\s+:\s+(?P<cts_sap>\d+)$")
# sap failure: 0
p_stat_sap_failure = re.compile(r"^sap\s+failure\s+:\s+(?P<cts_sap_failure>\d+)$")
# port auth failure: 0
p_port_fail = re.compile(r"^port\s+auth\s+failure\s+:\s+(?P<port_fail>\d+)$")
# Installed list: CTSServerList1-0085, 1 server(s):
p_installed_list = re.compile(r"^Installed\s+list:\s+(?P<serv_list_name>\S+),\s+(?P<serv_count>\d+)\s+server\(s\):$")
# *Server: 10.100.123.1, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
p_server_ast = re.compile(r"^\*Server:\s+(?P<serv_ip>[^,]+),\s+port\s+(?P<serv_port>\d+),\s+A-ID\s+(?P<serv_id>\S+)$")
# Server: 10.100.123.1, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
p_server = re.compile(r"^Server:\s+(?P<serv_ip>[^,]+),\s+port\s+(?P<serv_port>\d+),\s+A-ID\s+(?P<serv_id>\S+)$")
# Status = ALIVE
p_server_status = re.compile(r"^Status\s+=\s+(?P<serv_status>\S+)")
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
p_server_attributes = re.compile(
r"^auto-test\s+=\s+(?P<test>[^,]+),\s+keywrap-enable\s+=\s+(?P<keywrap>[^,]+),\s+idle-time\s+=\s+(?P<idle>\d+)\s+mins,\s+deadtime\s+=\s+(?P<dead>\d+)\s+secs$")
# ===================
p_equal_header = re.compile(r"^=+")
# CTS PAC Summary
p_header_1 = re.compile(r"^CTS\s+PAc\s+Summary$")
# PAC-Info:
p_pac_header = re.compile(r"^PAC-Info:$")
# PAC Valid Until: 09:24:04 UTC Oct 10 2020
p_pac_valid = re.compile(r"^PAC\s+Valid\s+Until:\s+(?P<valid>.*)")
# CTS Environment-Data Summary
p_environment_header = re.compile(r"^CTS\s+Environment-Data\s+Summary$")
# Environment Data Last Received: 09:26:17 UTC Tue Jul 14 2020
p_env_last = re.compile(r"^Environment\s+Data\s+Last\s+Received:\s+(?P<last>.*)$")
# Environment Data Valid Until: 0:16:13:37 (dd:hr:mm:sec)
p_env_valid = re.compile(r"^Environment\s+Data\s+Valid\s+Until:\s+(?P<value>\S+)\s+\((?P<format>[^)]+)\)$")
# SXP Connections Summary
p_sxp_summary = re.compile(r"^SXP\s+Connections\s+Summary$")
# SXP : Enabled
p_sxp_enabled = re.compile(r"SXP\s+:\s+(?P<status>Enabled|Disabled)$")
# Highest Version Supported: 4
p_sxp_version = re.compile(r"^Highest\s+Version\s+Supported:\s+(?P<version>\d+)")
# Default Password : Set
p_sxp_password = re.compile(r"^Default\s+Password\s+:\s+(?P<pass>.*)$")
# Default Key-Chain: Not Set
p_sxp_key_chain = re.compile(r"^Default\s+Key-Chain:\s+(?P<key>.*)$")
# Default Key-Chain Name: Not Applicable
p_sxp_key_chain_name = re.compile(r"^Default\s+Key-Chain\s+Name:\s+(?P<name>.*)$")
# Default Source IP: Not Set
p_sxp_source = re.compile(r"Default\s+Source\s+IP:\s+(?P<ip>.*)$")
# Connection retry open period: 120 secs
p_sxp_retry = re.compile(r"^Connection\s+retry\s+open\s+period:\s+(?P<time>\d+)\s+secs$")
# Reconcile period: 120 secs
p_reconcile = re.compile(r"^Reconcile\s+period:\s+(?P<period>\d+)\s+secs$")
# Retry open timer is not running
p_open_timer = re.compile(r"^Retry\s+open\s+timer\s+is\s+not\s+running$")
# Peer-Sequence traverse limit for export: Not Set
p_limit_export = re.compile(r"^Peer-Sequence\s+traverse\s+limit\s+for\s+export:\s+(?P<export>.*)$")
# Peer-Sequence traverse limit for import: Not Set
p_limit_import = re.compile(r"^Peer-Sequence\s+traverse\s+limit\s+for\s+import:\s+(?P<import>.*)$")
# There are no SXP Connections.
p_sxp_no_conn = re.compile(r"^There\s+are\s+no\s+SXP\s+Connections.$")
# ----------------------------------------------------------------------------------------------------------------------------------
p_hyphen_header = re.compile(r"^-+$")
# Peer_IP Source_IP Conn Status Duration
p_sxp_conn_header = re.compile(r"^Peer_IP\s+Source_IP\s+\s+Conn\s+Status\s+Duration$")
# 10.100.123.1 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
p_sxp_conn = re.compile(r"^(?P<peer_ip>\S+)\s+(?P<source_ip>\S+)\s+(?P<conn_status>\S+)\s+(?P<dur_val>\S+)\s+\((?P<dur_format>[^)]+)\)$")
# Total num of SXP Connections = 16
p_sxp_total = re.compile(r"^Total\s+num\s+of\s+SXP\s+Connections\s+=\s+(?P<total>\d+)$")
# Summary of IPv4 & IPv6 IP-SGT bindings
p_sum_ip_sgt = re.compile(r"^Summary\s+of\s+IPv4\s+&\s+IPv6\s+IP-SGT\s+bindings$")
# -IPv4-
p_ipv4_header = re.compile(r"^-IPv4-$")
# IP-SGT Active Bindings Summary
p_ip_sgt_active_header = re.compile(r"^IP-SGT\s+Active\s+Bindings\s+Summary$")
# Total number of SXP bindings = 3284
p_total_sxp = re.compile(r"^Total\s+number\s+of\s+SXP\s+bindings\s+=\s+(?P<total>\d+)$")
# Total number of active bindings = 3284
p_total_active_sxp = re.compile(r"^Total\s+number\s+of\s+active\s+bindings\s+=\s+(?P<active>\d+)$")
# -IPv6-
p_ipv6_header = re.compile(r"^-IPv6-$")
# CTS Role Based Enforcement: Enabled
p_role_based = re.compile(r"^CTS\s+Role\s+Based\s+Enforcement:\s+(?P<value>Enabled|Disabled)$")
# CTS Role Based VLAN Enforcement:Enabled
p_role_based_vlan = re.compile(r"^CTS\s+Role\s+Based\s+VLAN\s+Enforcement:(?P<value>Enabled|Disabled)$")
# Trusted/Un-Trusted Links
p_links_header = re.compile(r"^Trusted\/Un-Trusted\s+Links$")
# Number of Trusted interfaces = 0
p_trusted_links = re.compile(r"^Number\s+of\s+Trusted\s+interfaces\s+=\s+(?P<count>\d+)$")
# Number of Un-Trusted interfaces = 0
p_untrusted_links = re.compile(r"^Number\s+of\s+Un-Trusted\s+interfaces\s+=\s+(?P<count>\d+)$")
def update_bindings(active_bindings, cts_dict):
if len(active_bindings) == 2:
cts_dict["ip_sgt_bindings"]["ipv4"].update({ "total_sxp_bindings": int(active_bindings[0]) })
cts_dict["ip_sgt_bindings"]["ipv6"].update({ "total_sxp_bindings": int(active_bindings[1]) })
else:
# Update IPv4 binding count
cts_dict["ip_sgt_bindings"]["ipv4"].update({ "total_sxp_bindings": int(active_bindings[0]) })
cts_dict["ip_sgt_bindings"]["ipv4"].update({ "total_active_bindings": int(active_bindings[1]) })
# Update IPv6 binding count
cts_dict["ip_sgt_bindings"]["ipv6"].update({ "total_sxp_bindings": int(active_bindings[2]) })
cts_dict["ip_sgt_bindings"]["ipv6"].update({ "total_active_bindings": int(active_bindings[3]) })
return cts_dict
cts_dict = {}
active_bindings = []
for line in output.splitlines():
line = line.strip()
# Global Dot1x feature: Disabled
if p_dot1x.match(line):
match = p_dot1x.match(line)
cts_dict.update({ "dot1x_feature": match.group("dot1x") })
continue
# CTS device identity: "AAA2220Q2DP"
elif p_cts_device.match(line):
match = p_cts_device.match(line)
cts_dict.update({ "cts_device_identity": match.group("cts_identity")})
continue
# CTS caching support: disabled
elif p_cts_cache.match(line):
match = p_cts_cache.match(line)
cts_dict.update({ "cts_caching_support": match.group("cts_cache")})
continue
# CTS sgt-caching global: Disabled
elif p_cts_sgt.match(line):
match = p_cts_sgt.match(line)
cts_dict.update({ "cts_sgt_caching": match.group("cts_sgt")})
continue
# CTS global sgt-caching: Disabled
elif p_cts_2_sgt.match(line):
match = p_cts_2_sgt.match(line)
cts_dict.update({ "cts_sgt_caching": match.group("cts_2_sgt")})
continue
# CTS Ingress sgt-caching: Disabled
elif p_cts_ingress.match(line):
match = p_cts_ingress.match(line)
cts_dict.update({ "cts_ingress_sgt_caching": match.group("cts_ingress") })
# CTS sg-epg translation status: Disabled
elif p_cts_translation.match(line):
match = p_cts_translation.match(line)
cts_dict.update({ "cts_sg_epg_translation": match.group("cts_trans") })
# Number of CTS interfaces in DOT1X mode: 0, MANUAL mode: 0
elif p_dot1x_man_mode.match(line):
match = p_dot1x_man_mode.match(line)
cts_dict.update({ "interfaces_in_dot1x_mode": int(match.group("dot1x_mode")), "interfaces_in_manual_mode": int(match.group("man_mode")) })
continue
# Number of CTS interfaces in MANUAL mode: 0
elif p_man_mode.match(line):
match = p_man_mode.match(line)
cts_dict.update({ "interfaces_in_manual_mode": int(match.group("man_mode")) })
continue
# Number of CTS interfaces in LAYER3 TrustSec mode: 0
elif p_l3_mode.match(line):
match = p_l3_mode.match(line)
cts_dict.update({ "interfaces_in_l3_trustsec_mode": match.group("l3_mode")})
continue
# Number of CTS interfaces in corresponding IFC state
elif p_ifc_state.match(line):
if not cts_dict.get("interfaces_in_ifc_states"):
cts_dict.update({ "interfaces_in_ifc_states": {} })
continue
# INIT state: 0
elif p_init.match(line):
match = p_init.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "init" : int(match.group("init")) })
continue
# AUTHENTICATING state: 0
elif p_authenticating.match(line):
match = p_authenticating.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "authenticating" : int(match.group("auth")) })
continue
# AUTHORIZING state: 0
elif p_authorizing.match(line):
match = p_authorizing.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "authorizing" : int(match.group("authorizing")) })
continue
# SAP_NEGOTIATING state: 0
elif p_sap.match(line):
match = p_sap.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "sap_negotiating" : int(match.group("sap")) })
continue
# OPEN state: 0
elif p_open.match(line):
match = p_open.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "open" : int(match.group("open")) })
continue
# HELD state: 0
elif p_held.match(line):
match = p_held.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "held" : int(match.group("held")) })
continue
# DISCONNECTING state: 0
elif p_disconnect.match(line):
match = p_disconnect.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "disconnecting" : int(match.group("disconnect")) })
continue
# INVALID state: 0
elif p_invalid.match(line):
match = p_invalid.match(line)
cts_dict["interfaces_in_ifc_states"].update({ "invalid" : int(match.group("invalid")) })
continue
# CTS events statistics
elif p_cts_event.match(line):
if not cts_dict.get("cts_events_statistics"):
cts_dict.update({ "cts_events_statistics": {} })
continue
# authentication success: 0
elif p_stat_authentication.match(line):
match = p_stat_authentication.match(line)
cts_dict["cts_events_statistics"].update({ "authentication_success": int(match.group("cts_authentication")) })
continue
# authentication reject: 0
elif p_stat_reject.match(line):
match = p_stat_reject.match(line)
cts_dict["cts_events_statistics"].update({ "authentication_reject": int(match.group("cts_reject")) })
continue
# authentication failure: 0
elif p_stat_failure.match(line):
match = p_stat_failure.match(line)
cts_dict["cts_events_statistics"].update({ "authentication_failure": int(match.group("cts_failure")) })
continue
# authentication logoff: 0
elif p_stat_logoff.match(line):
match = p_stat_logoff.match(line)
cts_dict["cts_events_statistics"].update({ "authentication_logoff": int(match.group("cts_logoff")) })
continue
# authentication no resp: 0
elif p_stat_noresp.match(line):
match = p_stat_noresp.match(line)
cts_dict["cts_events_statistics"].update({ "authentication_no_resp": int(match.group("cts_noresp")) })
continue
# authorization success: 0
elif p_stat_authorization.match(line):
match = p_stat_authorization.match(line)
cts_dict["cts_events_statistics"].update({ "authorization_success": int(match.group("cts_authorization")) })
continue
# authorization failure: 0
elif p_stat_authorization_fail.match(line):
match = p_stat_authorization_fail.match(line)
cts_dict["cts_events_statistics"].update({ "authorization_failure": int(match.group("cts_authorization_fail")) })
continue
# sap success: 0
elif p_stat_sap.match(line):
match = p_stat_sap.match(line)
cts_dict["cts_events_statistics"].update({ "sap_success": int(match.group("cts_sap")) })
continue
# sap failure: 0
elif p_stat_sap_failure.match(line):
match = p_stat_sap_failure.match(line)
cts_dict["cts_events_statistics"].update({ "sap_failure": int(match.group("cts_sap_failure")) })
continue
elif p_port_fail.match(line):
match = p_port_fail.match(line)
cts_dict["cts_events_statistics"].update({ "port_auth_fail": int(match.group("port_fail")) })
continue
# Installed list: CTSServerList1-0089, 7 server(s):
elif p_installed_list.match(line):
match = p_installed_list.match(line)
group = match.groupdict()
if not cts_dict.get("installed_list"):
cts_dict.update({ "installed_list" : {} })
cts_dict["installed_list"].update({ "name" : group["serv_list_name"], "count": int(group["serv_count"]) })
continue
# *Server: 10.100.123.1, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
elif p_server_ast.match(line):
match = p_server_ast.match(line)
group = match.groupdict()
serv_ip = group["serv_ip"]
if not cts_dict["installed_list"].get("server_ip"):
cts_dict["installed_list"].update({ "server_ip": {} })
cts_dict["installed_list"]["server_ip"].update({ serv_ip: {} })
cts_dict["installed_list"]["server_ip"][serv_ip].update({ "port": int(group["serv_port"]), "a_id": group["serv_id"]})
continue
# Server: 10.100.123.1, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
elif p_server.match(line):
match = p_server.match(line)
group = match.groupdict()
serv_ip = group["serv_ip"]
if not cts_dict["installed_list"].get("server_ip"):
cts_dict["installed_list"].update({ "server_ip": {} })
cts_dict["installed_list"]["server_ip"].update({ serv_ip: {} })
cts_dict["installed_list"]["server_ip"][serv_ip].update({ "port": int(group["serv_port"]), "a_id": group["serv_id"]})
continue
# Status = ALIVE
elif p_server_status.match(line):
match = p_server_status.match(line)
cts_dict["installed_list"]["server_ip"][serv_ip].update({ "status": match.group("serv_status") })
continue
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
elif p_server_attributes.match(line):
match = p_server_attributes.match(line)
group = match.groupdict()
cts_dict["installed_list"]["server_ip"][serv_ip].update({ "auto_test": group["test"], "keywrap_enable": group["keywrap"],
"idle_time_mins": int(group["idle"]), "deadtime_secs": int(group["dead"]) })
continue
# Any number of '=' ex: "======" or "================================"
elif p_equal_header.match(line):
continue
# CTS PAC Summary
elif p_header_1.match(line):
continue
# PAC-info
elif p_pac_header.match(line):
continue
# PAC Valid Until: 09:24:04 UTC Oct 10 2020
elif p_pac_valid.match(line):
match = p_pac_valid.match(line)
cts_dict.update({ "pac_summary": { "pac_info": {} }})
cts_dict["pac_summary"]["pac_info"].update({ "pac_valid_until": match.group("valid")})
continue
# CTS Environment-Data Summary
elif p_environment_header.match(line):
if not cts_dict.get("environemtn_data_summary"):
cts_dict.update({ "environment_data_summary": {} })
continue
# Environment Data Last Received: 09:26:17 UTC Tue Jul 14 2020
elif p_env_last.match(line):
match = p_env_last.match(line)
cts_dict["environment_data_summary"].update({ "data_last_recieved": match.group("last") })
continue
# Environment Data Valid Until: 0:16:13:37 (dd:hr:mm:sec)
elif p_env_valid.match(line):
match = p_env_valid.match(line)
group = match.groupdict()
cts_dict["environment_data_summary"].update({ "data_valid_until": {} })
cts_dict["environment_data_summary"]["data_valid_until"].update({ "value" : group["value"], "value_format": group["format"] })
continue
# SXP Connections Summary
elif p_sxp_summary.match(line):
if not cts_dict.get("sxp_connections_summary"):
cts_dict.update({ "sxp_connections_summary": {} })
continue
# SXP : Enabled
elif p_sxp_enabled.match(line):
match = p_sxp_enabled.match(line)
cts_dict["sxp_connections_summary"].update({ "status": match.group("status") })
continue
# Highest Version Supported: 4
elif p_sxp_version.match(line):
match = p_sxp_version.match(line)
cts_dict["sxp_connections_summary"].update({ "highest_supported_version": int(match.group("version")) })
continue
# Default Password : Set
elif p_sxp_password.match(line):
match = p_sxp_password.match(line)
cts_dict["sxp_connections_summary"].update({ "default_password": match.group("pass") })
continue
# Default Key-Chain: Not Set
elif p_sxp_key_chain.match(line):
match = p_sxp_key_chain.match(line)
cts_dict["sxp_connections_summary"].update({ "default_key_chain": match.group("key") })
continue
# Default Key-Chain Name: Not Applicable
elif p_sxp_key_chain_name.match(line):
match = p_sxp_key_chain_name.match(line)
cts_dict["sxp_connections_summary"].update({ "default_key_chain_name": match.group("name") })
continue
# Default Source IP: Not Set
elif p_sxp_source.match(line):
match = p_sxp_source.match(line)
cts_dict["sxp_connections_summary"].update({ "default_source_ip": match.group("ip") })
continue
# Connection retry open period: 120 secs
elif p_sxp_retry.match(line):
match = p_sxp_retry.match(line)
cts_dict["sxp_connections_summary"].update({ "retry_open_period_secs": int(match.group("time")) })
continue
# Reconcile period: 120 secs
elif p_reconcile.match(line):
match = p_reconcile.match(line)
cts_dict["sxp_connections_summary"].update({ "reconcile_period_secs": int(match.group("period")) })
continue
# Retry open timer is not running
elif p_open_timer.match(line):
match = p_open_timer.match(line)
cts_dict["sxp_connections_summary"].update({ "retry_open_timer": "disabled" })
continue
# Peer-Sequence traverse limit for export: Not Set
elif p_limit_export.match(line):
match = p_limit_export.match(line)
cts_dict["sxp_connections_summary"].update({ "peer_sequence_limit_export": match.group("export") })
continue
# Peer-Sequence traverse limit for import: Not Set
elif p_limit_import.match(line):
match = p_limit_import.match(line)
cts_dict["sxp_connections_summary"].update({ "peer_sequence_limit_import": match.group("import") })
continue
# There are no SXP Connections.
elif p_sxp_no_conn.match(line):
continue
# ----------------------------------------------------------------------------------------------------------------------------------
elif p_hyphen_header.match(line):
continue
# Peer_IP Source_IP Conn Status Duration
elif p_sxp_conn_header.match(line):
continue
# Total num of SXP Connections = 16
elif p_sxp_total.match(line):
match = p_sxp_total.match(line)
cts_dict["sxp_connections_summary"].update({ "total_connections": int(match.group("total")) })
continue
# 10.100.123.1 192.168.2.24 On 44:19:54:57 (dd:hr:mm:sec)
elif p_sxp_conn.match(line):
match = p_sxp_conn.match(line)
group = match.groupdict()
peer_ip = group["peer_ip"]
if not cts_dict["sxp_connections_summary"].get("peer_ip"):
cts_dict["sxp_connections_summary"].update({ "peer_ip": {} })
cts_dict["sxp_connections_summary"]["peer_ip"].update({ peer_ip: {} })
cts_dict["sxp_connections_summary"]["peer_ip"][peer_ip].update({ "source_ip": group["source_ip"], "conn_status": group["conn_status"],
"duration": {} })
cts_dict["sxp_connections_summary"]["peer_ip"][peer_ip]["duration"].update({ "value": group["dur_val"], "value_format": group["dur_format"] })
continue
# Summary of IPv4 & IPv6 IP-SGT bindings
elif p_sum_ip_sgt.match(line):
if not cts_dict.get("ip_sgt_bindings"):
cts_dict.update({ "ip_sgt_bindings": {} })
continue
# -IPv4-
elif p_ipv4_header.match(line):
if not cts_dict["ip_sgt_bindings"].get("ipv4"):
cts_dict["ip_sgt_bindings"].update({ "ipv4": {} })
continue
# IP-SGT Active Bindings Summary
elif p_ip_sgt_active_header.match(line):
continue
# Total number of SXP bindings = 3284
elif p_total_sxp.match(line):
match = p_total_sxp.match(line)
active_bindings.append(match.group("total"))
# cts_dict["ip_sgt_bindings"]["ipv4"].update({ "total_sxp_bindings": match.group("total") })
continue
# Total number of active bindings = 3284
elif p_total_active_sxp.match(line):
match = p_total_active_sxp.match(line)
active_bindings.append(match.group("active"))
# cts_dict["ip_sgt_bindings"]["ipv4"].update({ "total_active_bindings": match.group("active") })
continue
# -IPv6-
elif p_ipv6_header.match(line):
if not cts_dict["ip_sgt_bindings"].get("ipv6"):
cts_dict["ip_sgt_bindings"].update({ "ipv6": {} })
continue
# CTS Role Based Enforcement: Enabled
elif p_role_based.match(line):
match = p_role_based.match(line)
cts_dict["ip_sgt_bindings"].update({ "cts_role_based_enforcement": match.group("value") })
continue
# CTS Role Based VLAN Enforcement: Enabled
elif p_role_based_vlan.match(line):
match = p_role_based_vlan.match(line)
cts_dict["ip_sgt_bindings"].update({ "cts_role_based_vlan_enforcement": match.group("value") })
continue
# Trusted/Un-Trusted Links
elif p_links_header.match(line):
if not cts_dict.get("trusted_untrusted_links"):
cts_dict.update({ "trusted_untrusted_links" : {} })
continue
# Number of Trusted interfaces = 0
elif p_trusted_links.match(line):
match = p_trusted_links.match(line)
cts_dict["trusted_untrusted_links"].update({ "number_trusted_links": int(match.group("count")) })
# Number of Un-Trusted interfaces = 0
elif p_untrusted_links.match(line):
match = p_untrusted_links.match(line)
cts_dict["trusted_untrusted_links"].update({ "number_untrusted_links": int(match.group("count")) })
continue
if not active_bindings:
return cts_dict
else:
cts_dict = update_bindings(active_bindings, cts_dict)
return cts_dict
# ==============================
# Schema for:
# * 'show cts environment-data'
# ==============================
class ShowCtsEnvironmentDataSchema(MetaParser):
"""Schema for show cts environment-data."""
schema = {
"cts_env": {
"current_state": str,
"last_status": str,
Optional("sgt_tags"): str,
Optional("tag_status"): str,
Optional("server_list_name"): str,
Optional("server_count"): int,
Optional("servers"): {
Optional(int): {
Optional("server_ip"): str,
Optional("port"): int,
Optional("aid"): str,
Optional("server_status"): str,
Optional("auto_test"): str,
Optional("keywrap_enable"): str,
Optional("idle_time_mins"): int,
Optional("dead_time_secs"): int
}
},
Optional("security_groups"): {
Optional(int): {
Optional("sec_group"): str,
Optional("sec_group_name"): str
}
},
Optional("env_data_lifetime_secs"): int,
Optional("last_update"): {
Optional("date"): str,
Optional("time"): str,
Optional("time_zone"): str
},
Optional("expiration"): str,
Optional("refresh"): str,
"state_machine_status": str,
Optional("retry_timer_status"): str,
Optional("cache_data_status"): str
}
}
# ==============================
# Parser for:
# * 'show cts environment-data'
# ==============================
class ShowCtsEnvironmentData(ShowCtsEnvironmentDataSchema):
"""Parser for show cts environment-data"""
cli_command = 'show cts environment-data'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
cts_env_dict = {}
# CTS Environment Data
# ====================
# Current state = COMPLETE
# Last status = Successful
# Local Device SGT:
# SGT tag = 0-16:Unknown
# Server List Info:
# Installed list: CTSServerList1-0089, 4 server(s):
# *Server: 10.1.100.4, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.1.100.5, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.1.100.6, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# *Server: 10.1.100.6, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
# Status = ALIVE
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
# Security Group Name Table:
# 0-15:Unit0
# 2-12:Unit1
# 3-10:Unit2
# 4-11:Device11
# 3215-08:K2
# 9999-06:Q1
# 68-10:North
# 5016-00:Quarantine
# 8000-00:TEST_8000
# Environment Data Lifetime = 86400 secs
# Last update time = 20:04:42 PDT Tue Jul 21 2020
# Env-data expires in 0:00:46:51 (dd:hr:mm:sec)
# Env-data refreshes in 0:00:46:51 (dd:hr:mm:sec)
# Cache data applied = NONE
# State Machine is running
# Current state = COMPLETE
current_state_capture = re.compile(r"^Current\s+state\s+=\s+(?P<state>.*$)")
# Last status = Successful
last_status_capture = re.compile(r"^Last\s+status\s+=\s+(?P<last_status>.*$)")
# SGT tag = 0-16:Unknown
tags_capture = re.compile(r"^SGT\s+tag\s+=\s+(?P<sgt_tags>\d+-\d+):(?P<tag_status>\w+)")
# Installed list: CTSServerList1-0089, 4 server(s):
server_list_capture = re.compile(
r"^Installed\s+list:\s+(?P<server_list_name>\S+),\s+(?P<server_count>\d+)\s+server\(s\):", re.MULTILINE)
# *Server: 10.1.100.4, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
servers_capture = re.compile(
r"^(\*|)Server:\s+(?P<server_ip>\d+\.\d+\.\d+\.\d+),\s+port\s+(?P<port>\d+),\s+A-ID\s+(?P<aid>\S+)")
# Status = ALIVE
server_status_capture = re.compile(r"^Status\s+=\s+(?P<server_status>\S+)")
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
keywrap_capture = re.compile(
r"^auto-test\s+=\s+(?P<auto_test>(TRUE|FALSE)),\s+keywrap-enable\s+=\s+(?P<keywrap_enable>(TRUE|FALSE)),\s+idle-time\s+=\s+(?P<idle_time_mins>\d+)\s+mins,\s+deadtime\s+=\s+(?P<dead_time_secs>\d+)\s+secs")
# 0-15:Unit0
sec_group_capture = re.compile(r"^(?P<sec_group>\S+):(?P<sec_group_name>\S+)")
# Environment Data Lifetime = 86400 secs
env_data_capture = re.compile(r"^Environment\s+Data\s+Lifetime\s+=\s+(?P<env_data_lifetime_secs>\d+)\s+secs")
# Last update time = 20:04:42 PDT Tue Jul 21 2020
last_update_capture = re.compile(
r"^Last\s+update\s+time\s+=\s+(?P<time>\d+:\d+:\d+)\s+(?P<time_zone>\w+)\s+(?P<day>\S+)\s+(?P<month>\S+)\s+(?P<date>\d+)\s+(?P<year>\d+)")
# Env-data expires in 0:00:46:51 (dd:hr:mm:sec)
expiration_capture = re.compile(r"^Env-data\s+expires\s+in\s+(?P<expiration>\d+:\d+:\d+:\d+)\s+\S+")
# Env-data refreshes in 0:00:46:51 (dd:hr:mm:sec)
refresh_capture = re.compile(r"^Env-data\s+refreshes\s+in\s+(?P<refresh>\d+:\d+:\d+:\d+)\s+\S+")
# Cache data applied = NONE
cache_data_capture = re.compile(r"^Cache\s+data\s+applied\s+=\s+(?P<cache_data_status>\S+)")
# State Machine is running
state_machine_capture = re.compile(r"^State\s+Machine\s+is\s+(?P<state_machine_status>\S+)")
# Retry_timer (60 secs) is not running
retry_capture = re.compile(
r"^Retry_timer\s+\((?P<retry_timer_secs>\d+)\s+secs\)\s+is\s+(?P<retry_timer_status>.*$)")
remove_lines = (
'CTS Environment Data', '=========', 'Local Device SGT:', 'Server List Info:', 'Security Group Name Table:')
# Remove unwanted lines from raw text
def filter_lines(raw_output, remove_lines):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
rendered_lines = []
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
# print(clean_line)
# Remove lines unwanted lines from list of "remove_lines"
if not clean_line_strip.startswith(remove_lines):
rendered_lines.append(clean_line_strip)
return rendered_lines
out = filter_lines(raw_output=out, remove_lines=remove_lines)
server_data = {}
security_groups = {}
keywrap_index = 1
sec_group_index = 1
for line in out:
# Current state = COMPLETE
current_state_match = current_state_capture.match(line)
if current_state_match:
groups = current_state_match.groupdict()
current_state = groups['state']
if not cts_env_dict.get('cts_env', {}):
cts_env_dict['cts_env'] = {}
cts_env_dict['cts_env']['current_state'] = current_state
continue
# Last status = Successful
last_status_match = last_status_capture.match(line)
if last_status_match:
groups = last_status_match.groupdict()
last_status = groups['last_status']
cts_env_dict['cts_env']['last_status'] = last_status
continue
# SGT tag = 0-16:Unknown
tags_match = tags_capture.match(line)
if tags_match:
groups = tags_match.groupdict()
sgt_tags = groups['sgt_tags']
tag_status = groups['tag_status']
cts_env_dict['cts_env']['sgt_tags'] = sgt_tags
cts_env_dict['cts_env']['tag_status'] = tag_status
continue
# Installed list: CTSServerList1-0089, 4 server(s):
server_list_match = server_list_capture.match(line)
if server_list_match:
groups = server_list_match.groupdict()
server_list_name = groups['server_list_name']
server_count = int(groups['server_count'])
cts_env_dict['cts_env']['server_list_name'] = server_list_name
cts_env_dict['cts_env']['server_count'] = server_count
continue
# *Server: 10.1.100.4, port 1812, A-ID A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A1A
servers_match = servers_capture.match(line)
if servers_match:
groups = servers_match.groupdict()
server_ip = groups['server_ip']
port = int(groups['port'])
aid = groups['aid']
server_data = {'server_ip': server_ip, 'port': port, 'aid': aid}
continue
# Status = ALIVE
server_status_match = server_status_capture.match(line)
if server_status_match:
groups = server_status_match.groupdict()
server_status = groups['server_status']
server_data.update({'server_status': server_status})
if not cts_env_dict['cts_env'].get('servers', {}):
cts_env_dict['cts_env']['servers'] = []
continue
# auto-test = FALSE, keywrap-enable = FALSE, idle-time = 60 mins, deadtime = 20 secs
keywrap_match = keywrap_capture.match(line)
if keywrap_match:
groups = keywrap_match.groupdict()
auto_test = groups['auto_test']
keywrap_enable = groups['keywrap_enable']
idle_time_mins = int(groups['idle_time_mins'])
dead_time_secs = int(groups['dead_time_secs'])
server_data.update(
{'auto_test': auto_test, 'keywrap_enable': keywrap_enable, 'idle_time_mins': idle_time_mins,
'dead_time_secs': dead_time_secs})
if not cts_env_dict['cts_env'].get('servers', {}):
cts_env_dict['cts_env']['servers'] = {}
if not cts_env_dict['cts_env']['servers'].get(keywrap_index, {}):
cts_env_dict['cts_env']['servers'][keywrap_index] = server_data
keywrap_index = keywrap_index + 1
continue
# 0-15:Unit0
sec_group_match = sec_group_capture.match(line)
if sec_group_match:
groups = sec_group_match.groupdict()
sec_group = groups['sec_group']
sec_group_name = groups['sec_group_name']
sec_groups_data = {'sec_group': sec_group, 'sec_group_name': sec_group_name}
if not cts_env_dict['cts_env'].get('security_groups', {}):
cts_env_dict['cts_env']['security_groups'] = {}
if not cts_env_dict['cts_env']['security_groups'].get(sec_group_index, {}):
cts_env_dict['cts_env']['security_groups'][sec_group_index] = sec_groups_data
sec_group_index = sec_group_index + 1
continue
# Environment Data Lifetime = 86400 secs
env_data_match = env_data_capture.match(line)
if env_data_match:
groups = env_data_match.groupdict()
if groups.get('env_empty', {}):
env_data = groups['env_empty']
cts_env_dict['cts_env']['env_data'] = env_data
else:
env_data_lifetime_secs = groups['env_data_lifetime_secs']
cts_env_dict['cts_env']['env_data_lifetime_secs'] = int(env_data_lifetime_secs)
continue
# Last update time = 20:04:42 PDT Tue Jul 21 2020
last_update_match = last_update_capture.match(line)
if last_update_match:
groups = last_update_match.groupdict()
time = groups['time']
time_zone = groups['time_zone']
day = groups['day']
month = groups['month']
date = groups['date']
year = groups['year']
full_date = f"{day}, {month}/{date}/{year}"
cts_env_dict['cts_env'].update(
{'last_update': {'date': full_date, 'time': time, 'time_zone': time_zone}})
continue
# Env-data expires in 0:00:46:51 (dd:hr:mm:sec)
expiration_match = expiration_capture.match(line)
if expiration_match:
groups = expiration_match.groupdict()
expiration = groups['expiration']
cts_env_dict['cts_env']['expiration'] = expiration
continue
# Env-data refreshes in 0:00:46:51 (dd:hr:mm:sec)
refresh_match = refresh_capture.match(line)
if refresh_match:
groups = refresh_match.groupdict()
refresh = groups['refresh']
cts_env_dict['cts_env']['refresh'] = refresh
continue
# Cache data applied = NONE
cache_data_match = cache_data_capture.match(line)
if cache_data_match:
groups = cache_data_match.groupdict()
cache_data_status = groups['cache_data_status']
cts_env_dict['cts_env']['cache_data_status'] = cache_data_status
continue
# State Machine is running
state_machine_match = state_machine_capture.match(line)
if state_machine_match:
groups = state_machine_match.groupdict()
state_machine_status = groups['state_machine_status']
cts_env_dict['cts_env']['state_machine_status'] = state_machine_status
continue
# Retry_timer (60 secs) is not running
retry_match = retry_capture.match(line)
if retry_match:
groups = retry_match.groupdict()
retry_timer_secs = int(groups['retry_timer_secs'])
retry_timer_status = groups['retry_timer_status']
cts_env_dict['cts_env']['state_machine_status'] = state_machine_status
cts_env_dict['cts_env']['retry_timer_status'] = retry_timer_status
continue
return cts_env_dict
# ======================
# Schema for:
# * 'show cts rbacl'
# ======================
class ShowCtsRbaclSchema(MetaParser):
"""Schema for show cts rbacl."""
schema = {
"cts_rbacl": {
"ip_ver_support": str,
"name": {
str: {
"ip_protocol_version": str,
"refcnt": int,
"flag": str,
"stale": bool,
"aces": {
Optional(int): {
Optional("action"): str,
Optional("protocol"): str,
Optional("direction"): str,
Optional("port"): int
}
}
}
}
}
}
# ======================
# Parser for:
# * 'show cts rbacl'
# ======================
class ShowCtsRbacl(ShowCtsRbaclSchema):
"""Parser for show cts rbacl"""
cli_command = 'show cts rbacl'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
cts_rbacl_dict = {}
# CTS RBACL Policy
# ================
# RBACL IP Version Supported: IPv4 & IPv6
# name = TCP_51005-01
# IP protocol version = IPV4
# refcnt = 2
# flag = 0x41000000
# stale = FALSE
# RBACL ACEs:
# permit tcp dst eq 51005
#
# name = TCP_51060-02
# IP protocol version = IPV4
# refcnt = 4
# flag = 0x41000000
# stale = FALSE
# RBACL ACEs:
# permit tcp dst eq 51060
#
# name = TCP_51144-01
# IP protocol version = IPV4
# refcnt = 10
# flag = 0x41000000
# stale = FALSE
# RBACL ACEs:
# permit tcp dst eq 51144
#
# name = TCP_51009-01
# IP protocol version = IPV4
# refcnt = 2
# flag = 0x41000000
# stale = FALSE
# RBACL ACEs:
# permit tcp dst eq 51009
# RBACL IP Version Supported: IPv4 & IPv6
ip_ver_capture = re.compile(r"^RBACL\s+IP\s+Version\s+Supported:\s(?P<ip_ver_support>.*$)")
# name = TCP_13131-01
# IP protocol version = IPV4
# refcnt = 2
# flag = 0x41000000
# stale = FALSE
rbacl_capture = re.compile(r"^(?P<rbacl_key>.*)(?==)=\s+(?P<rbacl_value>.*$)")
# permit tcp dst eq 13131
rbacl_ace_capture = re.compile(
r"^(?P<action>(permit|deny))\s+(?P<protocol>\S+)(\s+(?P<direction>dst|src)\s+((?P<port_condition>)\S+)\s+(?P<port>\d+)|)")
remove_lines = ('CTS RBACL Policy', '================', 'RBACL ACEs:')
# Remove unwanted lines from raw text
def filter_lines(raw_output, remove_lines):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
rendered_lines = []
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
if not clean_line_strip.startswith(remove_lines):
rendered_lines.append(clean_line_strip)
return rendered_lines
out = filter_lines(raw_output=out, remove_lines=remove_lines)
rbacl_name = ''
rbacl_ace_index = 1
for line in out:
# RBACL IP Version Supported: IPv4 & IPv6
ip_ver_match = ip_ver_capture.match(line)
if ip_ver_match:
groups = ip_ver_match.groupdict()
ip_ver_support = groups['ip_ver_support']
if not cts_rbacl_dict.get('cts_rbacl', {}):
cts_rbacl_dict['cts_rbacl'] = {}
cts_rbacl_dict['cts_rbacl']['name'] = {}
cts_rbacl_dict['cts_rbacl']['ip_ver_support'] = ip_ver_support
continue
# name = TCP_13131-01
# IP protocol version = IPV4
# refcnt = 2
# flag = 0x41000000
# stale = FALSE
elif rbacl_capture.match(line):
groups = rbacl_capture.match(line).groupdict()
rbacl_key = groups['rbacl_key'].strip().lower().replace(' ', '_')
rbacl_value = groups['rbacl_value']
if rbacl_value.isdigit():
rbacl_value = int(rbacl_value)
if rbacl_value == "TRUE" or rbacl_value == "FALSE":
if rbacl_value == "TRUE":
rbacl_value = True
else:
rbacl_value = False
if not cts_rbacl_dict.get('cts_rbacl', {}):
cts_rbacl_dict['cts_rbacl'] = {}
if rbacl_key == 'name':
rbacl_name = rbacl_value
cts_rbacl_dict['cts_rbacl']['name'][rbacl_name] = {}
rbacl_ace_index = 1
else:
cts_rbacl_dict['cts_rbacl']['name'][rbacl_name].update({rbacl_key: rbacl_value})
continue
# permit tcp dst eq 13131
elif rbacl_ace_capture.match(line):
groups = rbacl_ace_capture.match(line).groupdict()
ace_group_dict = {}
cts_rbacl_dict['cts_rbacl']['name'][rbacl_name]['aces'] = {}
if groups['action']:
ace_group_dict.update({'action': groups['action']})
if groups['protocol']:
ace_group_dict.update({'protocol': groups['protocol']})
if groups['direction']:
ace_group_dict.update({'direction': groups['direction']})
if groups['port_condition']:
ace_group_dict.update({'port_condition': groups['port_condition']})
if groups['port']:
ace_group_dict.update({'port': int(groups['port'])})
if not cts_rbacl_dict['cts_rbacl']['name'][rbacl_name]['aces'].get(rbacl_ace_index, {}):
cts_rbacl_dict['cts_rbacl']['name'][rbacl_name]['aces'][rbacl_ace_index] = ace_group_dict
rbacl_ace_index = rbacl_ace_index + 1
continue
return cts_rbacl_dict
# ====================================
# Schema for:
# * 'show cts role-based permissions'
# ====================================
class ShowCtsRoleBasedPermissionsSchema(MetaParser):
"""Schema for show cts role-based permissions."""
schema = {
"indexes": {
int: {
Optional("policy_name"): str,
"action_policy": str,
"action_policy_group": str,
Optional("src_grp_id"): int,
Optional("src_grp_name"): str,
Optional("unknown_group"): str,
Optional("dst_group_id"): int,
Optional("dst_group_name"): str,
Optional("policy_groups"): list
},
"monitor_dynamic": bool,
"monitor_configured": bool
}
}
# ====================================
# Parser for:
# * 'show cts role-based permissions'
# ====================================
class ShowCtsRoleBasedPermissions(ShowCtsRoleBasedPermissionsSchema):
"""Parser for show cts role-based permissions"""
cli_command = 'show cts role-based permissions'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
cts_rb_permissions_dict = {}
# IPv4 Role-based permissions default:
rb_default_capture = re.compile(r"^IPv4\s+Role-based\s+permissions\s+(?P<default_group>default)")
# IPv4 Role-based permissions from group 42:Untrusted to group Unknown:
rb_permissions_capture = re.compile(
r"^IPv4\s+Role-based\s+permissions\s+from\s+group\s+(?P<src_grp_id>\d+):(?P<src_grp_name>\S+)\s+to\s+group\s((?P<unknown_group>Unknown)|(?P<dst_group_id>\d+):(?P<dst_group_name>\S+)):")
# Deny IP-00
policy_action_capture = re.compile(r"^(?P<action_policy>(Permit|Deny))\s+(?P<action_policy_group>\S+)")
# ACCESS-01
policy_group_capture = re.compile(r"^(?P<policy_group>\w+-\d+)")
# RBACL Monitor All for Dynamic Policies : FALSE
monitor_dynamic_capture = re.compile(
r"^RBACL\s+Monitor\s+All\s+for\s+Dynamic\s+Policies\s+:\s+(?P<monitor_dynamic>(TRUE|FALSE))")
#RBACL Monitor All for Configured Policies : FALSE
monitor_configured_capture = re.compile(
r"^RBACL\s+Monitor\s+All\s+for\s+Configured\s+Policies\s+:\s+(?P<monitor_configured>(TRUE|FALSE))")
# Remove unwanted lines from raw text
def filter_lines(raw_output):
# Remove empty lines
clean_lines = list(filter(None, raw_output.splitlines()))
rendered_lines = []
for clean_line in clean_lines:
clean_line_strip = clean_line.strip()
# print(clean_line)
rendered_lines.append(clean_line_strip)
return rendered_lines
out = filter_lines(raw_output=out)
# Index value for each policy which will increment as it matches a new policy
policy_index = 1
# Index value for each policy group which will increment as it matches a new policy group
policy_group_index = 1
# Used to populate data for each policy and the policy index will be used as the key.
policy_data = {}
for line in out:
# IPv4 Role-based permissions default:
if rb_default_capture.match(line):
policy_group_index = 1
rb_default_match = rb_default_capture.match(line)
groups = rb_default_match.groupdict()
default_group = groups['default_group']
policy_data = {'policy_name': default_group}
if not cts_rb_permissions_dict.get('indexes', {}):
cts_rb_permissions_dict['indexes'] = {}
continue
# IPv4 Role-based permissions from group 42:Untrusted to group Unknown:
elif rb_permissions_capture.match(line):
policy_group_index = 1
rb_permissions_match = rb_permissions_capture.match(line)
groups = rb_permissions_match.groupdict()
policy_data = {}
if not cts_rb_permissions_dict.get('indexes', {}):
cts_rb_permissions_dict['indexes'] = {}
for k, v in groups.items():
if v:
if v.isdigit():
v = int(v)
policy_data.update({k: v})
continue
# ACCESS-01
elif policy_group_capture.match(line):
policy_group_match = policy_group_capture.match(line)
groups = policy_group_match.groupdict()
policy_group = groups['policy_group']
if not policy_data.get('policy_groups', []):
policy_data['policy_groups'] = []
policy_data['policy_groups'].append(policy_group)
continue
# Deny IP-00
elif policy_action_capture.match(line):
policy_action_match = policy_action_capture.match(line)
groups = policy_action_match.groupdict()
action_policy = groups['action_policy']
action_policy_group = groups['action_policy_group']
for k, v in groups.items():
policy_data.update({k: v})
cts_rb_permissions_dict['indexes'][policy_index] = policy_data
policy_index = policy_index + 1
continue
# RBACL Monitor All for Dynamic Policies : FALSE
elif monitor_dynamic_capture.match(line):
monitor_dynamic_match = monitor_dynamic_capture.match(line)
groups = monitor_dynamic_match.groupdict()
monitor_dynamic = groups['monitor_dynamic']
if monitor_dynamic == 'FALSE':
monitor_dynamic = False
else:
monitor_dynamic = True
cts_rb_permissions_dict['indexes']['monitor_dynamic'] = monitor_dynamic
continue
# RBACL Monitor All for Configured Policies : FALSE
elif monitor_configured_capture.match(line):
monitor_configured_match = monitor_configured_capture.match(line)
groups = monitor_configured_match.groupdict()
monitor_configured = groups['monitor_configured']
if monitor_configured == 'FALSE':
monitor_configured = False
else:
monitor_configured = True
cts_rb_permissions_dict['indexes']['monitor_configured'] = monitor_configured
continue
return cts_rb_permissions_dict
|
"""
Driver of graph construction, optimization, and linking.
"""
import copy
import copyreg
import logging
import os
import pickle
import time
import warnings
from itertools import chain
from typing import List
import numpy as np
import aesara
import aesara.compile.profiling
from aesara.compile.compilelock import lock_ctx
from aesara.compile.io import In, SymbolicInput, SymbolicOutput
from aesara.compile.ops import deep_copy_op, view_op
from aesara.configdefaults import config
from aesara.graph.basic import (
Constant,
Variable,
ancestors,
clone_get_equiv,
graph_inputs,
vars_between,
)
from aesara.graph.destroyhandler import DestroyHandler
from aesara.graph.features import PreserveVariableAttributes
from aesara.graph.fg import FunctionGraph, InconsistencyError
from aesara.graph.op import HasInnerGraph
from aesara.graph.opt_utils import is_same_graph
from aesara.graph.utils import get_variable_trace_string
from aesara.link.basic import Container
from aesara.link.utils import raise_with_op
_logger = logging.getLogger("aesara.compile.function.types")
__docformat__ = "restructuredtext en"
class UnusedInputError(Exception):
"""
A symbolic input passed to function is not needed.
"""
def alias_root(v):
"""
Return the variable to which v is aliased by view_maps and destroy_maps.
"""
if v.owner is None:
return v
vmap = v.owner.op.view_map
dmap = v.owner.op.destroy_map
outpos = v.owner.outputs.index(v)
v_views = vmap.get(outpos, []) + dmap.get(outpos, [])
if len(v_views) > 1:
raise NotImplementedError(
f"{v} is a view/destroyed version of more then one inputs. "
"Currently, we only support the case where an output is a view or "
"a destroyed version of one input."
)
elif v_views:
return alias_root(v.owner.inputs[v_views[0]])
else:
return v
def view_tree_set(fgraph, v, treeset):
"""
Add to `treeset` all variables that are views of v, given that v is
not a view.
"""
treeset.add(v)
for cl, v_input_pos_to_cl in fgraph.clients[v]:
if cl == "output":
continue
vmap = cl.op.view_map
dmap = cl.op.destroy_map
for opos, iposlist in chain(vmap.items(), dmap.items()):
if v_input_pos_to_cl in iposlist:
if cl.outputs[opos] not in treeset:
view_tree_set(fgraph, cl.outputs[opos], treeset)
def infer_reuse_pattern(fgraph, outputs_to_disown):
"""
Given an fgraph and a list of variables, returns the list or set
of all variables which may share the same underlying data storage
as any of the specified variables. Used internally by function,
FunctionMaker.
This list (or set) is also referred to as no_recycling sometimes,
especially by linker code.
"""
rval = set()
for o in outputs_to_disown:
view_tree_set(fgraph, alias_root(o), rval)
# remove from rval all of the inputs, constants, values.
rval = {r for r in rval if r.owner is not None}
return rval
def fgraph_updated_vars(fgraph, expanded_inputs):
"""
Reconstruct the full "updates" dictionary, mapping from FunctionGraph input
variables to the fgraph outputs that will replace their values.
Returns
-------
dict variable -> variable
"""
updated_vars = {}
potential_values = list(fgraph.outputs) # copy the list
if len(expanded_inputs) != len(fgraph.inputs):
raise ValueError("expanded_inputs must match len(fgraph.inputs)")
for e_input, ivar in reversed(list(zip(expanded_inputs, fgraph.inputs))):
if e_input.update is not None:
updated_vars[ivar] = potential_values.pop()
return updated_vars
class Supervisor:
"""
Listener for FunctionGraph events which makes sure that no
operation overwrites the contents of protected Variables. The
outputs of the FunctionGraph are protected by default.
"""
def __init__(self, protected):
self.protected = list(protected)
def validate(self, fgraph):
if config.cycle_detection == "fast" and hasattr(fgraph, "has_destroyers"):
if fgraph.has_destroyers(self.protected):
raise InconsistencyError("Trying to destroy protected variables.")
return True
if not hasattr(fgraph, "destroyers"):
return True
for r in self.protected + list(fgraph.outputs):
if fgraph.destroyers(r):
raise InconsistencyError(f"Trying to destroy a protected variable: {r}")
def std_fgraph(input_specs, output_specs, accept_inplace=False):
"""
Makes an FunctionGraph corresponding to the input specs and the output
specs. Any SymbolicInput in the input_specs, if its update field
is not None, will add an output to the FunctionGraph corresponding to that
update. The return value is the FunctionGraph as well as a list of
SymbolicOutput instances corresponding to the updates.
If accept_inplace is False, the graph will be checked for inplace
operations and an exception will be raised if it has any. If
accept_inplace is True, a DestroyHandler will be added to the FunctionGraph
if there are any inplace operations.
The returned FunctionGraph is a clone of the graph between the provided
inputs and outputs.
"""
orig_inputs = [spec.variable for spec in input_specs]
# Extract the updates and the mapping between update outputs and
# the updated inputs.
updates = []
update_mapping = {}
out_idx = len(output_specs)
for inp_idx in range(len(input_specs)):
if input_specs[inp_idx].update:
updates.append(input_specs[inp_idx].update)
update_mapping[out_idx] = inp_idx
out_idx += 1
orig_outputs = [spec.variable for spec in output_specs] + updates
fgraph = FunctionGraph(orig_inputs, orig_outputs, update_mapping=update_mapping)
for node in fgraph.apply_nodes:
if node.op.destroy_map:
if not accept_inplace:
raise TypeError(f"Graph must not contain inplace operations: {node}")
else:
fgraph.attach_feature(DestroyHandler())
break
# We need to protect all immutable inputs from inplace operations.
fgraph.attach_feature(
Supervisor(
input
for spec, input in zip(input_specs, fgraph.inputs)
if not (
spec.mutable
or (hasattr(fgraph, "destroyers") and fgraph.has_destroyers([input]))
)
)
)
# If named nodes are replaced, keep the name
for feature in std_fgraph.features:
fgraph.attach_feature(feature())
return fgraph, list(map(SymbolicOutput, updates))
std_fgraph.features = [PreserveVariableAttributes]
class AliasedMemoryError(Exception):
"""
Memory is aliased that should not be.
"""
###
# Function
###
# unique id object used as a placeholder for duplicate entries
DUPLICATE = ["DUPLICATE"]
class Function:
"""
Type of the functions returned by aesara.function or
aesara.FunctionMaker.create.
`Function` is the callable object that does computation. It has the storage
of inputs and outputs, performs the packing and unpacking of inputs and
return values. It implements the square-bracket indexing so that you can
look up the value of a symbolic node.
Functions are copyable via {{{fn.copy()}}} and {{{copy.copy(fn)}}}.
When a function is copied, this instance is duplicated. Contrast with
self.maker (instance of `FunctionMaker`) that is shared between copies.
The meaning of copying a function is that the containers and their current
values will all be duplicated. This requires that mutable inputs be
copied, whereas immutable inputs may be shared between copies.
A Function instance is hashable, on the basis of its memory
address (its id).
A Function instance is only equal to itself.
A Function instance may be serialized using the `pickle` or
`cPickle` modules. This will save all default inputs, the graph,
and WRITEME to the pickle file.
A Function instance have a ``trust_input`` field that default to
False. When True, we don't do extra check of the input to give
better error message. In some case, python code will still return
the good results if you pass a python or numpy scalar instead of a
numpy tensor. C code should raise an error if you pass an object
of the wrong type.
Attributes
----------
finder
inv_finder
"""
pickle_aliased_memory_strategy = "warn"
"""
How to deal with pickling finding aliased storage.
Meaningful settings are: 'ignore', 'warn', 'raise'.
If the value is 'warn', then a message will be printed to stderr
if aliased storage is detected during pickle.dump.
If the value is 'raise', then an AliasedMemoryError will be raised
if aliased storage is detected during pickle.dump.
"""
input_storage = None
"""
List of Container instances.
"""
output_storage = None
"""
List of Container instances.
"""
indices = None
"""
List of (SymbolicInput, indices, [SymbolicInput,...]),
one tuple for each input.
The first tuple element is the SymbolicInput object for the corresponding
function input.
The second and third tuple elements are used only by Kits, which
are deprecated.
"""
defaults = None
"""
List of 3-tuples, one 3-tuple for each input.
Tuple element 0: Bool: Is this input required at each function call?
Tuple element 1: Bool: Should this inputs value be reverted after
each call?
Tuple element 2: Any: The value associated with this input.
"""
unpack_single = None
"""
Bool: for outputs lists of length 1, should the 0'th element be
returned directly?
"""
return_none = None
"""
Bool: whether the function should return None or not.
"""
maker = None
"""
FunctionMaker instance.
"""
fn = None
"""
A function that evaluates the graph. Typically a linker's make_thunk method
created this function.
"""
finder = None
"""
Dictionary mapping several kinds of things to containers.
We set an entry in finder for:
- the index of the input
- the variable instance the input is based on
- the name of the input
All entries map to the container or to DUPLICATE if an ambiguity
is detected.
"""
inv_finder = None
"""
Dict. Reverse lookup of `finder`.
It maps container -> SymbolicInput
"""
def __init__(
self,
fn,
input_storage,
output_storage,
indices,
outputs,
defaults,
unpack_single,
return_none,
output_keys,
maker,
name=None,
):
self.fn = fn
self.input_storage = input_storage
self.output_storage = output_storage
self.indices = indices
self.outputs = outputs
self.defaults = defaults
self.unpack_single = unpack_single
self.return_none = return_none
self.maker = maker
self.profile = None # reassigned in FunctionMaker.create
self.trust_input = False # If True, we don't check the input parameter
self.name = name
self.nodes_with_inner_function = []
self.output_keys = output_keys
# See if we have any mutable / borrow inputs
# TODO: this only need to be set if there is more then 1 input
self._check_for_aliased_inputs = False
for i in maker.inputs:
# If the input is a shared variable, the memory region is
# under Aesara control and so we don't need to check if it
# is aliased as we never do that.
if (
isinstance(i, In)
and not i.shared
and (getattr(i, "borrow", False) or getattr(i, "mutable", False))
):
self._check_for_aliased_inputs = True
break
# We will be popping stuff off this `containers` object. It is a copy.
containers = list(self.input_storage)
finder = {}
inv_finder = {}
def distribute(indices, cs, value):
input.distribute(value, indices, cs)
for c in cs:
c.provided += 1
# Store the list of names of named inputs.
named_inputs = []
# Count the number of un-named inputs.
n_unnamed_inputs = 0
# Initialize the storage
# this loop works by modifying the elements (as variable c) of
# self.input_storage inplace.
for i, ((input, indices, sinputs), (required, refeed, value)) in enumerate(
zip(self.indices, defaults)
):
if indices is None:
# containers is being used as a stack. Here we pop off
# the next one.
c = containers[0]
c.strict = getattr(input, "strict", False)
c.allow_downcast = getattr(input, "allow_downcast", None)
if value is not None:
# Always initialize the storage.
if isinstance(value, Container):
# There is no point in obtaining the current value
# stored in the container, since the container is
# shared.
# For safety, we make sure 'refeed' is False, since
# there is no need to refeed the default value.
assert not refeed
else:
c.value = value
c.required = required
c.implicit = input.implicit
# this is a count of how many times the input has been
# provided (reinitialized to 0 on __call__)
c.provided = 0
finder[i] = c
finder[input.variable] = c
if input.name not in finder:
finder[input.name] = c
else:
finder[input.name] = DUPLICATE
if input.name is None:
n_unnamed_inputs += 1
else:
named_inputs.append(input.name)
inv_finder[c] = input
containers[:1] = []
self.finder = finder
self.inv_finder = inv_finder
# this class is important in overriding the square-bracket notation:
# fn.value[x]
# self reference is available via the closure on the class
class ValueAttribute:
def __getitem__(self, item):
try:
s = finder[item]
except KeyError:
raise TypeError(f"Unknown input or state: {item}")
if s is DUPLICATE:
raise TypeError(
f"Ambiguous name: {item} - please check the "
"names of the inputs of your function "
"for duplicates."
)
if isinstance(s, Container):
return s.value
else:
raise NotImplementedError
def __setitem__(self, item, value):
try:
s = finder[item]
except KeyError:
# Print informative error message.
msg = get_info_on_inputs(named_inputs, n_unnamed_inputs)
raise TypeError(f"Unknown input or state: {item}. {msg}")
if s is DUPLICATE:
raise TypeError(
f"Ambiguous name: {item} - please check the "
"names of the inputs of your function "
"for duplicates."
)
if isinstance(s, Container):
s.value = value
s.provided += 1
else:
s(value)
def __contains__(self, item):
return finder.__contains__(item)
# this class is important in overriding the square-bracket notation:
# fn.container[x]
# self reference is available via the closure on the class
class ContainerAttribute:
def __getitem__(self, item):
return finder[item]
def __contains__(self, item):
return finder.__contains__(item)
# You cannot set the container
self._value = ValueAttribute()
self._container = ContainerAttribute()
# Compute self.n_returned_outputs.
# This is used only when fn.need_update_inputs is False
# because we're using one of the VM objects and it is
# putting updates back into the input containers all by itself.
assert len(self.maker.expanded_inputs) == len(self.input_storage)
self.n_returned_outputs = len(self.output_storage)
for input in self.maker.expanded_inputs:
if input.update is not None:
self.n_returned_outputs -= 1
for node in self.maker.fgraph.apply_nodes:
if isinstance(node.op, HasInnerGraph):
self.nodes_with_inner_function.append(node.op)
def __contains__(self, item):
return self.value.__contains__(item)
def __getitem__(self, item):
return self.value[item]
def __setitem__(self, item, value):
self.value[item] = value
def __copy__(self):
"""
Copy a function. Copied function have separate intermediate
storages and output storages with original function
"""
return self.copy()
def copy(
self,
share_memory=False,
swap=None,
delete_updates=False,
name=None,
profile=None,
):
"""
Copy this function. Copied function will have separated maker and
fgraph with original function. User can choose whether to separate
storage by changing the share_memory arguments.
Parameters
----------
share_memory : boolean
When True, two function share intermediate storages(storages except input and
output storages). Otherwise two functions will only share partial
storages and same maker. If two functions share memory and
allow_gc=False, this will increase executing speed and save memory.
swap : dict
Dictionary that map old SharedVariables to new
SharedVariables. Default is None.
NOTE: The shared variable swap in only done in the new returned
function, not in the user graph.
delete_updates : boolean
If True, Copied function will not have updates.
name : string
If provided, will be the name of the new
Function. Otherwise, it will be old + " copy"
profile :
as aesara.function profile parameter
Returns
-------
aesara.Function
Copied aesara.Function
"""
# helper function
def checkSV(sv_ori, sv_rpl):
"""
Assert two SharedVariable follow some restirctions:
1. same type
2. same shape or dim?
"""
SharedVariable = aesara.tensor.sharedvar.SharedVariable
assert isinstance(sv_ori, SharedVariable), (
"Key of swap should be SharedVariable, given:",
sv_ori,
" type",
type(sv_ori),
)
assert isinstance(sv_rpl, SharedVariable), (
"Value of swap should be SharedVariable, given:",
sv_rpl,
"type",
type(sv_ori),
)
assert sv_ori.type == sv_rpl.type, (
"Type of given SharedVariable conflicts with original one",
"Type of given SharedVariable:",
sv_rpl.type,
"Type of original SharedVariable:",
sv_ori.type,
)
maker = self.maker
# Copy Ins and their storage.
# so that they have different storage as their value
ins = [copy.copy(input) for input in maker.inputs]
# Delete update output in fgraph and updates In instances if needed
if delete_updates:
# The first len(maker.outputs) variables are original variables.
# The rest are the updates.
out_vars = maker.fgraph.outputs[: len(maker.outputs)]
else:
out_vars = maker.fgraph.outputs
# Init new fgraph using copied variables and get memo
# memo: a dict that map old variables to new variables
memo = clone_get_equiv(maker.fgraph.inputs, out_vars)
fg_cpy = FunctionGraph(
[memo[i] for i in maker.fgraph.inputs],
[memo[o] for o in out_vars],
clone=False,
)
# Re initialize Outs and swap update and variable in Ins
# By doing this, we can pass FunctionMaker._check_unused_inputs()
outs = list(map(SymbolicOutput, fg_cpy.outputs[: len(maker.outputs)]))
for out_ori, out_cpy in zip(maker.outputs, outs):
out_cpy.borrow = out_ori.borrow
# swap SharedVariable
if swap is not None:
exist_svs = [i.variable for i in maker.inputs]
# Check if given ShareVariables exist
for sv in swap.keys():
if sv not in exist_svs:
raise ValueError(f"SharedVariable: {sv.name} not found")
# Swap SharedVariable in fgraph and In instances
for index, (i, in_v) in enumerate(zip(ins, fg_cpy.inputs)):
# Variables in maker.inputs are defined by user, therefore we
# use them to make comparison and do the mapping.
# Otherwise we don't touch them.
var = maker.inputs[index].variable
if var in swap:
swap_sv = swap[var]
checkSV(i.variable, swap_sv)
# swap variable and value of In instances
i.variable = swap_sv
i.value = swap_sv.container
# In the fgraph we use the cloned SharedVariable
swap_sv = swap_sv.clone()
# Swap SharedVariable in fgraph
# if inputs was replaced, change self.inputs
fg_cpy.inputs[index] = swap_sv
fg_cpy.replace(in_v, swap_sv, reason="Swap SV")
# Delete update if needed
update_i = len(outs)
for i, in_var in zip(ins, fg_cpy.inputs):
i.variable = in_var
if not delete_updates and i.update is not None:
i.update = fg_cpy.outputs[update_i]
update_i += 1
else:
i.update = None
# Construct new storage_map that map new variable to old storage,
# so that the ensuing function shares storage with the original one
storage_map = self.fn.storage_map
new_storage_map = {}
# TODO: We could share the output storage, but we must make sure
# 2 different function call won't override each other values. This
# is already done elsewhere, so to reuse it the user would need to
# use Out(var, borrow=True) and maybe the mutable=True flag too.
# But to be safe for now as it isn't documented and we aren't sure
# it is well tested, we don't share the part of the storage_map.
if share_memory:
i_o_vars = maker.fgraph.inputs + maker.fgraph.outputs
for key in storage_map.keys():
if key not in i_o_vars:
new_storage_map[memo[key]] = storage_map[key]
if not name and self.name:
name = self.name + " copy"
input_storage = [i.value for i in ins]
# reinitialize new maker and create new function
if profile is None:
profile = config.profile or config.print_global_stats
# profile -> True or False
if profile is True:
if name:
message = name
else:
message = str(profile.message) + " copy"
profile = aesara.compile.profiling.ProfileStats(message=message)
# profile -> object
elif type(profile) == str:
profile = aesara.compile.profiling.ProfileStats(message=profile)
f_cpy = maker.__class__(
inputs=ins,
outputs=outs,
fgraph=fg_cpy,
mode=maker.mode,
profile=profile,
# When removing updates containing variables
# not used in the output function, copy
# generates an unused implicit input.
# We ignore the resulting errors,
# but could change it to 'warn' if this might
# cause problems.
on_unused_input="ignore",
function_builder=maker.function_builder,
# As this is an optimized graph, it
# can contain inplace. DebugMode check
# that.
accept_inplace=True,
).create(input_storage, storage_map=new_storage_map)
for in_ori, in_cpy, ori, cpy in zip(
maker.inputs, f_cpy.maker.inputs, self.input_storage, f_cpy.input_storage
):
# Share immutable ShareVariable and constant input's storage
swapped = swap is not None and in_ori.variable in swap
# Using the original storage if SharedVariable will not be updated
# and is not swapped
if not in_ori.mutable and not swapped:
cpy.data = ori.data
in_cpy.value = in_ori.value
# Reconstruct Function.finder which map Variable defined by user
# to container, to make Function.value and Function.data work well.
# Replace variable in new maker.inputs by the original ones.
# So that user can swap SharedVariable in a swapped function
container = f_cpy.finder.pop(in_cpy.variable)
if not swapped:
f_cpy.finder[in_ori.variable] = container
in_cpy.variable = in_ori.variable
else:
f_cpy.finder[swap[in_ori.variable]] = container
in_cpy.variable = swap[in_ori.variable]
f_cpy.name = name
f_cpy.maker.fgraph.name = name
return f_cpy
def __call__(self, *args, **kwargs):
"""
Evaluates value of a function on given arguments.
Parameters
----------
args : list
List of inputs to the function. All inputs are required, even when
some of them are not necessary to calculate requested subset of
outputs.
kwargs : dict
The function inputs can be passed as keyword argument. For this, use
the name of the input or the input instance as the key.
Keyword argument ``output_subset`` is a list of either indices of the
function's outputs or the keys belonging to the `output_keys` dict
and represent outputs that are requested to be calculated. Regardless
of the presence of ``output_subset``, the updates are always calculated
and processed. To disable the updates, you should use the ``copy``
method with ``delete_updates=True``.
Returns
-------
list
List of outputs on indices/keys from ``output_subset`` or all of them,
if ``output_subset`` is not passed.
"""
def restore_defaults():
for i, (required, refeed, value) in enumerate(self.defaults):
if refeed:
if isinstance(value, Container):
value = value.storage[0]
self[i] = value
profile = self.profile
t0 = time.time()
output_subset = kwargs.pop("output_subset", None)
if output_subset is not None and self.output_keys is not None:
output_subset = [self.output_keys.index(key) for key in output_subset]
# Reinitialize each container's 'provided' counter
if self.trust_input:
i = 0
for arg in args:
s = self.input_storage[i]
s.storage[0] = arg
i += 1
else:
for c in self.input_storage:
c.provided = 0
if len(args) + len(kwargs) > len(self.input_storage):
raise TypeError("Too many parameter passed to aesara function")
# Set positional arguments
i = 0
for arg in args:
# TODO: provide a Param option for skipping the filter if we
# really want speed.
s = self.input_storage[i]
# see this emails for a discuation about None as input
# https://groups.google.com/group/theano-dev/browse_thread/thread/920a5e904e8a8525/4f1b311a28fc27e5
if arg is None:
s.storage[0] = arg
else:
try:
s.storage[0] = s.type.filter(
arg, strict=s.strict, allow_downcast=s.allow_downcast
)
except Exception as e:
function_name = "aesara function"
argument_name = "argument"
if self.name:
function_name += ' with name "' + self.name + '"'
if hasattr(arg, "name") and arg.name:
argument_name += ' with name "' + arg.name + '"'
where = get_variable_trace_string(self.maker.inputs[i].variable)
if len(e.args) == 1:
e.args = (
"Bad input "
+ argument_name
+ " to "
+ function_name
+ f" at index {int(i)} (0-based). {where}"
+ e.args[0],
)
else:
e.args = (
"Bad input "
+ argument_name
+ " to "
+ function_name
+ f" at index {int(i)} (0-based). {where}"
) + e.args
restore_defaults()
raise
s.provided += 1
i += 1
# Set keyword arguments
if kwargs: # for speed, skip the items for empty kwargs
for k, arg in kwargs.items():
self[k] = arg
if (
not self.trust_input
and
# The getattr is only needed for old pickle
getattr(self, "_check_for_aliased_inputs", True)
):
# Collect aliased inputs among the storage space
args_share_memory = []
for i in range(len(self.input_storage)):
i_var = self.maker.inputs[i].variable
i_val = self.input_storage[i].storage[0]
if hasattr(i_var.type, "may_share_memory"):
is_aliased = False
for j in range(len(args_share_memory)):
group_j = zip(
[
self.maker.inputs[k].variable
for k in args_share_memory[j]
],
[
self.input_storage[k].storage[0]
for k in args_share_memory[j]
],
)
if any(
[
(
var.type is i_var.type
and var.type.may_share_memory(val, i_val)
)
for (var, val) in group_j
]
):
is_aliased = True
args_share_memory[j].append(i)
break
if not is_aliased:
args_share_memory.append([i])
# Check for groups of more than one argument that share memory
for group in args_share_memory:
if len(group) > 1:
# copy all but the first
for j in group[1:]:
self.input_storage[j].storage[0] = copy.copy(
self.input_storage[j].storage[0]
)
# Check if inputs are missing, or if inputs were set more than once, or
# if we tried to provide inputs that are supposed to be implicit.
if not self.trust_input:
for c in self.input_storage:
if c.required and not c.provided:
restore_defaults()
raise TypeError(
f"Missing required input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}"
)
if c.provided > 1:
restore_defaults()
raise TypeError(
f"Multiple values for input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}"
)
if c.implicit and c.provided > 0:
restore_defaults()
raise TypeError(
f"Tried to provide value for implicit input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}"
)
# Do the actual work
t0_fn = time.time()
try:
outputs = (
self.fn()
if output_subset is None
else self.fn(output_subset=output_subset)
)
except Exception:
restore_defaults()
if hasattr(self.fn, "position_of_error"):
# this is a new vm-provided function or c linker
# they need this because the exception manipulation
# done by raise_with_op is not implemented in C.
thunk = None
if hasattr(self.fn, "thunks"):
thunk = self.fn.thunks[self.fn.position_of_error]
raise_with_op(
self.maker.fgraph,
node=self.fn.nodes[self.fn.position_of_error],
thunk=thunk,
storage_map=getattr(self.fn, "storage_map", None),
)
else:
# old-style linkers raise their own exceptions
raise
dt_fn = time.time() - t0_fn
self.maker.mode.fn_time += dt_fn
if profile:
profile.vm_call_time += dt_fn
# Retrieve the values that were computed
if outputs is None:
outputs = [x.data for x in self.output_storage]
assert len(outputs) == len(self.output_storage)
# Remove internal references to required inputs.
# These cannot be re-used anyway.
for c in self.input_storage:
if c.required:
c.storage[0] = None
# if we are allowing garbage collection, remove the
# output reference from the internal storage cells
if getattr(self.fn, "allow_gc", False):
assert len(self.output_storage) == len(self.maker.fgraph.outputs)
for o_container, o_variable in zip(
self.output_storage, self.maker.fgraph.outputs
):
if o_variable.owner is not None:
# this node is the variable of computation
# WARNING: This circumvents the 'readonly' attribute in x
o_container.storage[0] = None
if getattr(self.fn, "need_update_inputs", True):
# Update the inputs that have an update function
for input, storage in reversed(
list(zip(self.maker.expanded_inputs, self.input_storage))
):
if input.update is not None:
storage.data = outputs.pop()
else:
outputs = outputs[: self.n_returned_outputs]
# Put default values back in the storage
restore_defaults()
#
# NOTE: This logic needs to be replicated in
# scan.
# grep for 'PROFILE_CODE'
#
dt_call = time.time() - t0
aesara.compile.profiling.total_fct_exec_time += dt_call
self.maker.mode.call_time += dt_call
if profile:
profile.fct_callcount += 1
profile.fct_call_time += dt_call
if hasattr(self.fn, "update_profile"):
self.fn.update_profile(profile)
if profile.ignore_first_call:
profile.reset()
profile.ignore_first_call = False
if self.return_none:
return None
elif self.unpack_single and len(outputs) == 1 and output_subset is None:
return outputs[0]
else:
if self.output_keys is not None:
assert len(self.output_keys) == len(outputs)
if output_subset is None:
return dict(zip(self.output_keys, outputs))
else:
return {
self.output_keys[index]: outputs[index]
for index in output_subset
}
if output_subset is None:
return outputs
else:
return [outputs[i] for i in output_subset]
value = property(
lambda self: self._value,
None, # this property itself is not settable
doc="dictionary-like access to the values associated with Variables",
)
container = property(
lambda self: self._container,
None, # this property itself is not settable
doc=("dictionary-like access to the containers associated with " "Variables"),
)
def free(self):
"""
When allow_gc = False, clear the Variables in storage_map
"""
# 1.no allow_gc return False
# 2.has allow_gc, if allow_gc is False, return True
if not getattr(self.fn, "allow_gc", True):
for key in self.fn.storage_map:
if not isinstance(key, Constant):
self.fn.storage_map[key][0] = None
for node in self.nodes_with_inner_function:
if hasattr(node.fn, "free"):
node.fn.free()
def get_shared(self):
"""
Return the shared variable read or updated by by this function.
"""
return [i.variable for i in self.maker.inputs if i.implicit]
def sync_shared(self):
if hasattr(aesara, "gpuarray") and aesara.gpuarray.pygpu_activated:
import pygpu
for i in self.maker.fgraph.update_mapping.values():
inp = self.input_storage[i]
if isinstance(inp.data, pygpu.gpuarray.GpuArray):
inp.data.sync()
# pickling/deepcopy support for Function
def _pickle_Function(f):
# copy of the input storage list
ins = list(f.input_storage)
input_storage = []
for (input, indices, inputs), (required, refeed, default) in zip(
f.indices, f.defaults
):
input_storage.append(ins[0])
del ins[0]
inputs_data = [x.data for x in f.input_storage]
# HACK to detect aliased storage.
# This is here because aliased relationships are not [currently]
# preserved across the pickle operation
if not (f.pickle_aliased_memory_strategy == "ignore"):
all_data = input_storage + inputs_data
for i, d_i in enumerate(all_data):
for j, d_j in enumerate(all_data):
if (
(i < j)
and isinstance(d_i, np.ndarray)
and isinstance(d_j, np.ndarray)
):
if np.may_share_memory(d_i, d_j):
if f.pickle_aliased_memory_strategy == "warn":
_logger.warning(
"aliased relationship between "
f"Function arguments {d_i}, {d_j} "
"will not be preserved by "
"un-pickling operation"
)
else:
raise AliasedMemoryError(d_i, d_j)
# The user can override trust_input. Our doc tell that. We should
# not do that anymore and make sure the Maker have all the
# information needed.
rval = (_constructor_Function, (f.maker, input_storage, inputs_data, f.trust_input))
return rval
def _constructor_Function(maker, input_storage, inputs_data, trust_input=False):
if not config.unpickle_function:
return None
f = maker.create(input_storage, trustme=True)
assert len(f.input_storage) == len(inputs_data)
for container, x in zip(f.input_storage, inputs_data):
assert (
(container.data is x)
or (isinstance(x, np.ndarray) and (container.data == x).all())
or (container.data == x)
)
f.trust_input = trust_input
return f
copyreg.pickle(Function, _pickle_Function)
###
# FunctionMaker
###
def insert_deepcopy(fgraph, wrapped_inputs, wrapped_outputs):
"""
Insert deepcopy in the fgraph to break aliasing of outputs
"""
# This loop was inserted to remove aliasing between outputs when
# they all evaluate to the same value. Originally it was OK for
# outputs to be aliased, but some of the outputs can be shared
# variables, and is not good for shared variables to be
# aliased. It might be possible to optimize this by making sure
# there is no aliasing only between shared variables.
# If some outputs are constant, we add deep copy to respect the
# memory contract
# We don't insert deep copy when the output.borrow is True for all
# concerned outputs.
assert len(wrapped_inputs) == len(fgraph.inputs)
assert len(wrapped_outputs) == len(fgraph.outputs)
reason = "insert_deepcopy"
updated_fgraph_inputs = {
fgraph_i
for i, fgraph_i in zip(wrapped_inputs, fgraph.inputs)
if getattr(i, "update", False)
}
# We can't use fgraph.inputs as this don't include Constant Value.
all_graph_inputs = list(graph_inputs(fgraph.outputs))
has_destroyers_attr = hasattr(fgraph, "has_destroyers")
for i in range(len(fgraph.outputs)):
views_of_output_i = set()
view_tree_set(fgraph, alias_root(fgraph.outputs[i]), views_of_output_i)
copied = False
# do not allow outputs to be aliased
for j in range(i + 1, len(fgraph.outputs)):
# We could don't put deep copy if both outputs have borrow==True
# and not(wrapped_outputs[i].borrow and wrapped_outputs[j].borrow):
if fgraph.outputs[j] in views_of_output_i:
if wrapped_outputs[i].borrow and wrapped_outputs[j].borrow:
fgraph.change_input(
"output", i, view_op(fgraph.outputs[i]), reason=reason
)
else:
fgraph.change_input(
"output", i, deep_copy_op(fgraph.outputs[i]), reason=reason
)
copied = True
break
if not copied:
for input_j in all_graph_inputs:
# do not allow outputs to be aliased to an inputs (j), unless
# a) that j'th input has been 'destroyed' by
# e.g. in-place computations
# b) that j'th input is a shared variable that is also
# being updated
if input_j in updated_fgraph_inputs:
continue
if input_j in views_of_output_i and not (
has_destroyers_attr and fgraph.has_destroyers([input_j])
):
# We don't put deep_copy_op if the input and the
# output have borrow==True
if input_j in fgraph.inputs:
j = fgraph.inputs.index(input_j)
if wrapped_outputs[i].borrow and wrapped_inputs[j].borrow:
fgraph.change_input(
"output",
i,
view_op(fgraph.outputs[i]),
reason="insert_deepcopy",
)
break
else:
fgraph.change_input(
"output",
i,
deep_copy_op(fgraph.outputs[i]),
reason="insert_deepcopy",
)
break
elif wrapped_outputs[i].borrow:
fgraph.change_input(
"output",
i,
view_op(fgraph.outputs[i]),
reason="insert_deepcopy",
)
break
else:
fgraph.change_input(
"output",
i,
deep_copy_op(fgraph.outputs[i]),
reason="insert_deepcopy",
)
break
NODEFAULT = ["NODEFAULT"]
class FunctionMaker:
"""
`FunctionMaker` is the class to `create` `Function` instances.
This class has the fgraph, the optimizer, and the linker. When
copying a `Function`, there is no need to duplicate the
`FunctionMaker` instance. Deepcopy still copies both, which can
variable in re-compilation.
Parameters
----------
inputs : list of SymbolicInput instances
outputs : list of SymbolicOutput instances
Outputs may also be a single Variable (not a list), in which case the
functions produced by FunctionMaker will return their output value
directly.
mode : Mode instance
Telling FunctionMaker how to optimize and link. None means to use the
`config.mode`.
accept_inplace : bool
True iff it is acceptable to have inplace operations in the graph from
the inputs to the outputs.
on_unused_input : {'raise', 'warn', 'ignore', None}
What to do if a variable in the 'inputs' list is not used in the graph.
Possible values are:
- 'raise': raise an error
- 'warn': log a warning
- 'ignore': do not do anything
- None: Use the value in the Aesara flags on_unused_input.
name : str
An optional name for this function. If used, the profile mode will
print the time spent in this function.
"""
@staticmethod
def wrap_in(input):
if isinstance(input, (SymbolicInput)):
return input
elif isinstance(input, Variable):
# r -> SymbolicInput(variable=r)
return SymbolicInput(input)
elif isinstance(input, (list, tuple)):
# (r, u) -> SymbolicInput(variable=r, update=u)
if len(input) == 2:
return SymbolicInput(input[0], update=input[1])
else:
raise TypeError(
f"Expected two elements in the list or tuple; got {input}"
)
else:
raise TypeError(
f"Unknown input type: {type(input)} ({input}), expected Variable "
"instance"
)
@staticmethod
def expand_in(sinput, rinputs):
# For SymbolicInputKits, this extracts a list of SymbolicInput
# instances and corresponding indices such that these
# SymbolicInputs are representative of some of the Variable
# instances in inputs. For SymbolicInput, this returns None
# as the list of indices and a list with just the
# SymbolicInput.
# if isinstance(sinput, SymbolicInputKit):
# return sinput.complete(rinputs)
# elif isinstance(sinput, SymbolicInput):
if isinstance(sinput, SymbolicInput):
return [None, [sinput]]
@staticmethod
def wrap_out(output):
if isinstance(output, SymbolicOutput):
return output
elif isinstance(output, Variable):
return SymbolicOutput(output)
else:
raise TypeError(f"Unknown output type: {type(output)} ({output})")
def optimize_graph_with_cache(self, optimizer, inputs, outputs):
# This function is not finished
graph_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl")
# the inputs, outputs, and size of the graph to be optimized
inputs_new = [inp.variable for inp in inputs]
outputs_new = [out.variable for out in outputs]
size_new = len(self.fgraph.apply_nodes)
# Beginning of cache optimizations.
# Could be refactored in different functions.
def load_graph_db():
if os.path.isfile(graph_db_file):
print("graph_db already exists")
else:
# create graph_db
with open(graph_db_file, "wb") as f:
print(f"create new graph_db in {graph_db_file}")
# load the graph_db dictionary
try:
with open(graph_db_file, "rb") as f, config.change_flags(
unpickle_function=False
):
# Temporary hack to allow
# tests.scan.test_scan.T_Scan to
# finish. Should be changed in definitive version.
graph_db = pickle.load(f)
print("graph_db loaded and it is not empty")
except EOFError as e:
# the file has nothing in it
print(e)
print("graph_db loaded and it is empty")
graph_db = {}
return graph_db
def find_same_graph_in_db(graph_db):
# If found_graph_in_db is None, then need to optimize.
# Otherwise, return the graph found.
found_graph_in_db = None
# The sole purpose of this loop is to set 'need_optimize' by
# going through graph_db, looking for graph that has the same
# computation performed.
for graph_old, graph_optimized in graph_db.items():
inputs_old = graph_old.inputs
outputs_old = graph_old.outputs
size_old = len(graph_old.apply_nodes)
# Some heuristics to check is the same graphs have
# already been optimized before.
if len(inputs_new) != len(inputs_old):
# If the inputs are of different size,
# two graphs are for sure different
print("need to optimize, because input size is different")
continue
elif len(outputs_new) != len(outputs_old):
# If the inputs are of different size,
# two graphs are for sure different
print("need to optimize, because output size is different")
continue
elif not all(
input_new.type == input_old.type
for input_new, input_old in zip(inputs_new, inputs_old)
):
print("need to optimize, because inputs are of different " "types")
continue
elif not all(
output_new.type == output_old.type
for output_new, output_old in zip(outputs_new, outputs_old)
):
print("need to optimize, because outputs are of different " "types")
continue
elif not size_old == size_new:
print(
"need to optimize, because numbers of nodes in graph"
" are different"
)
continue
else:
flags = []
for i, (output_new, output_old) in enumerate(
zip(outputs_new, outputs_old)
):
print("loop through outputs node for both graphs")
graph_old.variables = set(
vars_between(graph_old.inputs, graph_old.outputs)
)
# using clone allowed to avoid a lot of errors
# deep copy seemed to had.
f2 = graph_old.clone(check_integrity=False)
t1 = output_new
t2 = f2.outputs[i]
givens = dict(
zip(
graph_inputs([t1]),
graph_inputs([t2]),
)
)
temp = dict(
zip(
graph_inputs([t1]),
graph_inputs([t2]),
)
)
# hack to remove inconsistent entry in givens
# seems to work that but source of inconsistency
# could be worth investigating.
for key, value in temp.items():
if key.type != value.type:
del givens[key]
flag = is_same_graph(t1, t2, givens=givens)
flags.append(flag)
is_same = all(flags)
if is_same:
# found the match
print("found a match, no need to optimize")
found_graph_in_db = graph_optimized
break
return found_graph_in_db
with lock_ctx():
graph_db = load_graph_db()
print(f"loaded graph_db from {graph_db_file}, size={len(graph_db)}")
found_graph = find_same_graph_in_db(graph_db)
if found_graph:
self.fgraph = found_graph
optimizer_profile = None
else:
# this is a brand new graph, optimize it, save it to graph_db
print("graph not found in graph_db, optimizing the graph")
self.fgraph.variables = set(
vars_between(self.fgraph.inputs, self.fgraph.outputs)
)
# check_integrity parameters was added to ignore
# "excess cached variables" errors. Works that way
# but once again the error couldbe worth
# investigating.
before_opt = self.fgraph.clone(check_integrity=False)
optimizer_profile = optimizer(self.fgraph)
graph_db.update({before_opt: self.fgraph})
with open(graph_db_file, "wb") as f:
pickle.dump(graph_db, f, -1)
print("new graph saved into graph_db")
return optimizer_profile
def __init__(
self,
inputs,
outputs,
mode=None,
accept_inplace=False,
function_builder=Function,
profile=None,
on_unused_input=None,
fgraph=None,
output_keys=None,
name=None,
):
# Save the provided mode, not the instantiated mode.
# The instantiated mode don't pickle and if we unpickle an Aesara
# function and it get re-compiled, we want the current optimizer to be
# used, not the optimizer when it was saved.
self.mode = mode
mode = aesara.compile.mode.get_mode(mode)
# Assert old way of working isn't used
if getattr(mode, "profile", None):
raise TypeError("profile passed via 'mode'. This isn't supported anymore")
self.profile = profile
if profile:
# This is very important:
# 1) We preload the cache here to not have its timing
# included in optimization that compile function.
# 2) Do not refresh the cache here by default. It cause
# too much execution time during testing as we compile
# much more functions then the number of compile c
# module.
aesara.link.c.basic.get_module_cache().refresh()
# Handle the case where inputs and/or outputs is a single
# Variable (not in a list)
unpack_single = False
return_none = False
if outputs is None:
return_none = True
outputs = []
if not isinstance(outputs, (list, tuple)):
unpack_single = True
outputs = [outputs]
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
# Wrap them in In or Out instances if needed.
inputs = [self.wrap_in(i) for i in inputs]
outputs = [self.wrap_out(o) for o in outputs]
_inputs = list(
graph_inputs(
[o.variable for o in outputs]
+ [i.update for i in inputs if getattr(i, "update", False)]
)
)
# Check if some input variables are unused
self._check_unused_inputs(inputs, outputs, on_unused_input)
# Make a list of (SymbolicInput|SymblicInputKits, indices,
# [SymbolicInput,...]), one tuple for each input. (See
# Function.indices for more details)
indices = [[input] + self.expand_in(input, _inputs) for input in inputs]
if fgraph is None:
need_opt = True
# make the fgraph (copies the graph, creates NEW INPUT AND
# OUTPUT VARIABLES)
fgraph, additional_outputs = std_fgraph(inputs, outputs, accept_inplace)
fgraph.profile = profile
else:
# fgraph is already an optimized one
need_opt = False
updates = [spec.update for spec in inputs if spec.update]
additional_outputs = list(map(SymbolicOutput, updates))
self.fgraph = fgraph
# Fetch the optimizer and linker
optimizer, linker = mode.optimizer, copy.copy(mode.linker)
if need_opt:
# Why we add stack on node when it get done in output var?
try:
start_optimizer = time.time()
# In case there is an error during optimization.
optimizer_profile = None
opt_time = None
with config.change_flags(
compute_test_value=config.compute_test_value_opt,
traceback__limit=config.traceback__compile_limit,
):
# now optimize the graph
if config.cache_optimizations:
optimizer_profile = self.optimize_graph_with_cache(
optimizer, inputs, outputs
)
else:
optimizer_profile = optimizer(fgraph)
end_optimizer = time.time()
opt_time = end_optimizer - start_optimizer
_logger.debug(f"Optimizing took {opt_time:f} seconds")
# Add deep copy to respect the memory interface
insert_deepcopy(fgraph, inputs, outputs + additional_outputs)
finally:
# If the optimizer got interrupted
if opt_time is None:
end_optimizer = time.time()
opt_time = end_optimizer - start_optimizer
aesara.compile.profiling.total_graph_opt_time += opt_time
if profile:
if optimizer_profile is None and hasattr(optimizer, "pre_profile"):
optimizer_profile = optimizer.pre_profile
profile.optimizer_time += opt_time
if config.profile_optimizer:
profile.optimizer_profile = (optimizer, optimizer_profile)
# IF False, if mean the profile for that function was
# explicitly disabled
elif config.profile_optimizer and profile is not False:
warnings.warn(
(
"config.profile_optimizer requires config.profile to "
" be set to True as well"
),
stacklevel=3,
)
# initialize the linker
if not hasattr(linker, "accept"):
raise ValueError(
"'linker' parameter of FunctionMaker should be "
f"a Linker with an accept method or one of {list(aesara.compile.mode.predefined_linkers.keys())}"
)
# the 'no_borrow' outputs are the ones for which that we can't
# return the internal storage pointer.
assert len(fgraph.outputs) == len(outputs + additional_outputs)
no_borrow = [
output
for output, spec in zip(fgraph.outputs, outputs + additional_outputs)
if not spec.borrow
]
if no_borrow:
self.linker = linker.accept(
fgraph,
no_recycling=infer_reuse_pattern(fgraph, no_borrow),
profile=profile,
)
else:
self.linker = linker.accept(fgraph, profile=profile)
if hasattr(linker, "accept_var_updates"):
# hacky thing so VMLinker knows about updates
self.linker.accept_var_updates(fgraph_updated_vars(fgraph, inputs))
fgraph.name = name
self.indices = indices
self.inputs = inputs
self.expanded_inputs = inputs
self.outputs = outputs
self.unpack_single = unpack_single
self.return_none = return_none
self.accept_inplace = accept_inplace
self.function_builder = function_builder
self.on_unused_input = on_unused_input # Used for the pickling/copy
self.output_keys = output_keys
self.name = name
self.required = [(i.value is None) for i in self.inputs]
self.refeed = [
(
i.value is not None
and not isinstance(i.value, Container)
and i.update is None
)
for i in self.inputs
]
def _check_unused_inputs(self, inputs, outputs, on_unused_input):
if on_unused_input is None:
on_unused_input = config.on_unused_input
if on_unused_input == "ignore":
return
# There should be two categories of variables in inputs:
# - variables that have to be provided (used_inputs)
# - shared variables that will be updated
used_inputs = list(
ancestors(
(
[o.variable for o in outputs]
+ [i.update for i in inputs if getattr(i, "update", False)]
),
blockers=[i.variable for i in inputs],
)
)
msg = (
"aesara.function was asked to create a function computing "
"outputs given certain inputs, but the provided input "
"variable at index %i is not part of the computational graph "
"needed to compute the outputs: %s.\n%s"
)
warn_msg = (
"To make this warning into an error, you can pass the "
"parameter on_unused_input='raise' to aesara.function. "
"To disable it completely, use on_unused_input='ignore'."
)
err_msg = (
"To make this error into a warning, you can pass the "
"parameter on_unused_input='warn' to aesara.function. "
"To disable it completely, use on_unused_input='ignore'."
)
for i in inputs:
if (i.variable not in used_inputs) and (i.update is None):
if on_unused_input == "warn":
warnings.warn(
msg % (inputs.index(i), i.variable, warn_msg), stacklevel=6
)
elif on_unused_input == "raise":
raise UnusedInputError(msg % (inputs.index(i), i.variable, err_msg))
else:
raise ValueError(
"Invalid value for keyword on_unused_input of aesara.function: "
f"'{on_unused_input}'.\n"
"Valid values are 'raise', 'warn', and 'ignore'."
)
def create(self, input_storage=None, trustme=False, storage_map=None):
"""
Create a function.
Parameters
----------
input_storage
A list matching the inputs list and providing default values if the
default for an input is None, then that input is a required input.
For an input with an update, the default acts as initialization.
trustme
Disables some exceptions, used internally.
"""
if input_storage is None:
input_storage = [None] * len(self.inputs)
# list of independent one-element lists, will be passed to the linker
input_storage_lists = []
defaults = []
# The following loop is to fill in the input_storage_lists and
# defaults lists.
assert len(self.indices) == len(input_storage)
for i, ((input, indices, subinputs), input_storage_i) in enumerate(
zip(self.indices, input_storage)
):
# Replace any default value given as a variable by its
# container. Note that this makes sense only in the
# context of shared variables, but for now we avoid
# dealing directly with them to avoid dependency on the
# shared variables work-in-progress repository.
if isinstance(input_storage_i, Variable):
input_storage_i = input_storage_i.container
if isinstance(input_storage_i, Container):
# If the default is a Container, this means we want to
# share the same storage. This is done by appending
# input_storage_i.storage to input_storage_lists.
if indices is not None:
raise TypeError(
"Cannot take a Container instance as "
"default for a SymbolicInputKit."
)
input_storage_lists.append(input_storage_i.storage)
storage = input_storage[i].storage[0]
else:
# Normal case: one new, independent storage unit
input_storage_lists.append([input_storage_i])
storage = input_storage_i
required = self.required[i]
refeed = self.refeed[i]
# sanity check-- if an input is required it should not
# need to be refed
assert not (required and refeed)
# shared variables need neither be input by the user nor refed
if input.shared:
assert not required
assert not refeed
storage = None
# if an input is required, it never need be refed
if required:
storage = None
# make sure that we only store a value if we actually need it
if storage is not None:
assert refeed or not required
defaults.append((required, refeed, storage))
# Get a function instance
start_linker = time.time()
start_import_time = aesara.link.c.cmodule.import_time
with config.change_flags(traceback__limit=config.traceback__compile_limit):
_fn, _i, _o = self.linker.make_thunk(
input_storage=input_storage_lists, storage_map=storage_map
)
end_linker = time.time()
linker_time = end_linker - start_linker
aesara.compile.profiling.total_time_linker += linker_time
_logger.debug(f"Linker took {linker_time:f} seconds")
if self.profile:
self.profile.linker_time += linker_time
_fn.time_thunks = self.profile.flag_time_thunks
import_time = aesara.link.c.cmodule.import_time - start_import_time
self.profile.import_time += import_time
fn = self.function_builder(
_fn,
_i,
_o,
self.indices,
self.outputs,
defaults,
self.unpack_single,
self.return_none,
self.output_keys,
self,
name=self.name,
)
fn.profile = self.profile
return fn
def _constructor_FunctionMaker(kwargs):
# Needed for old pickle
# Old pickle have at least the problem that output_keys where not saved.
if config.unpickle_function:
if config.reoptimize_unpickled_function:
del kwargs["fgraph"]
return FunctionMaker(**kwargs)
else:
return None
__checkers: List = []
def check_equal(x, y):
for checker in __checkers:
try:
return checker(x, y)
except Exception:
continue
return x == y
def register_checker(checker):
__checkers.insert(0, checker)
def orig_function(
inputs,
outputs,
mode=None,
accept_inplace=False,
name=None,
profile=None,
on_unused_input=None,
output_keys=None,
):
"""
Return a Function that will calculate the outputs from the inputs.
Parameters
----------
inputs : list of `SymbolicInput` or `In` instances
outputs : a SymbolicOutput or a list of `SymbolicOutput` or `Out` instances
The return value of the returned function will match the format of this
argument (either the value itself or a list of one or more return
values).
mode : descriptive string or Mode instance
Default of None means to use `config.mode` (see below for descriptive
string list).
name : str
An optional name for this function. If used, the profile mode will print the
time spent in this function.
accept_inplace : bool
True iff the graph can contain inplace operations prior to the
optimization phase (default is False).
profile : None or ProfileStats instance
on_unused_input : {'raise', 'warn', 'ignore', None}
What to do if a variable in the 'inputs' list is not used in the graph.
output_keys :
If the outputs were provided to aesara.function as a list, then
output_keys is None. Otherwise, if outputs were provided as a dict,
output_keys is the sorted list of keys from the outputs.
Notes
-----
Currently, the library provides the following mode strings:
- FAST_RUN (default) (optimize without too much time)
- FAST_COMPILE (minimal optimization)
- DebugMode: verify many internal conditions that are normally assumed
(slow)
"""
# Every element of the input list will be upgraded to an `In` instance if
# necessary, using the rules implemented by the `convert_function_input`
# function.
# Similarly, every element of the output list will be upgraded to an `Out`
# instance if necessary:
t1 = time.time()
mode = aesara.compile.mode.get_mode(mode)
inputs = list(map(convert_function_input, inputs))
if outputs is not None:
if isinstance(outputs, (list, tuple)):
outputs = list(map(FunctionMaker.wrap_out, outputs))
else:
outputs = FunctionMaker.wrap_out(outputs)
defaults = [getattr(input, "value", None) for input in inputs]
if isinstance(mode, (list, tuple)): # "mode comparison" semantics
raise Exception("We do not support the passing of multiple modes")
fn = None
try:
Maker = getattr(mode, "function_maker", FunctionMaker)
m = Maker(
inputs,
outputs,
mode,
accept_inplace=accept_inplace,
profile=profile,
on_unused_input=on_unused_input,
output_keys=output_keys,
name=name,
)
with config.change_flags(compute_test_value="off"):
fn = m.create(defaults)
finally:
t2 = time.time()
if fn and profile:
profile.compile_time += t2 - t1
# TODO: append
profile.nb_nodes = len(fn.maker.fgraph.apply_nodes)
return fn
def convert_function_input(input):
"""
Upgrade a input shortcut to an In instance.
The rules for upgrading are as follows:
- a `Variable` instance r will be upgraded like `In`(r)
- a tuple (name, r) will be `In`(r, name=name)
- a tuple (r, val) will be `In`(r, value=value, autoname=True)
- a tuple ((r,up), val) will be
`In`(r, value=value, update=up, autoname=True)
- a tuple (name, r, val) will be `In`(r, name=name, value=value)
- a tuple (name, (r,up), val) will be
`In`(r, name=name, value=val, update=up, autoname=True)
"""
if isinstance(input, SymbolicInput):
return input
elif isinstance(input, Constant):
raise TypeError(f"A Constant instance is not a legal function input: {input}")
elif isinstance(input, Variable):
return In(input)
elif isinstance(input, (list, tuple)):
orig = input
if not input:
raise TypeError(f"Nonsensical input specification: {input}")
if isinstance(input[0], str):
name = input[0]
input = input[1:]
else:
name = None
if isinstance(input[0], (list, tuple)):
if len(input[0]) != 2 or len(input) != 2:
raise TypeError(
f"Invalid input syntax: {orig} (check "
"documentation or use an In instance)"
)
(variable, update), value = input
elif isinstance(input[0], Variable):
if len(input) == 1:
variable, update, value = input[0], None, None
elif len(input) == 2:
(variable, value), update = input, None
else:
raise TypeError(
f"Invalid input syntax: {orig} (check "
"documentation or use an In instance)"
)
elif isinstance(input[0], SymbolicInput):
if len(input) == 1:
return input[0]
elif len(input) == 2:
input, value = input
if name is not None:
input.name = name
input.value = value
return input
else:
raise TypeError(f"The input specification is not valid: {input}")
if not isinstance(variable, Variable):
raise TypeError(
f"Unknown input type: {type(variable)}, expected Variable instance"
)
if update is not None and not isinstance(update, Variable):
raise TypeError(
f"Unknown update type: {type(update)}, expected Variable instance"
)
if value is not None and isinstance(value, (Variable, SymbolicInput)):
raise TypeError(
f"The value for input {variable} should not be a Variable "
f"or SymbolicInput instance (got: {value})"
)
return In(variable, name=name, value=value, update=update)
else:
raise TypeError(
f"Unknown input type: {type(input)}, expected Variable instance"
)
def get_info_on_inputs(named_inputs, n_unnamed_inputs):
"""
Return a human-readable description of named and un-named inputs.
"""
n_named_inputs = len(named_inputs)
def get_plural(n):
if n > 1:
return "s"
else:
return ""
if n_named_inputs == 0:
if n_unnamed_inputs == 0:
msg = "The function is supposed to have no input."
else:
if n_unnamed_inputs == 1:
msg = (
"The function has a single input variable which has no "
"name, and thus cannot be assigned through a keyword"
" argument (use 'name=...' in a Variable's "
"constructor to give it a name)."
)
else:
# Use plural.
msg = (
f"The function has {n_unnamed_inputs} inputs, but none of them is named,"
" and thus they cannot be assigned through keyword "
"arguments (use 'name=...' in a Variable's "
"constructor to give it a name)."
)
else:
if n_unnamed_inputs == 0:
msg = "The function has {} named input{} ({}).".format(
n_named_inputs,
get_plural(n_named_inputs),
", ".join(named_inputs),
)
else:
msg = (
f"The function has {n_named_inputs} named input{get_plural(n_named_inputs)} ({', '.join(named_inputs)}), and {n_unnamed_inputs} unnamed "
f"input{get_plural(n_unnamed_inputs)} which thus cannot be accessed through keyword "
f"argument{get_plural(n_unnamed_inputs)} (use 'name=...' in a variable's constructor "
"to give it a name)."
)
return msg
|
""" goTenna API objects - part of pyGT https://github.com/sybip/pyGT """
""" WARNING: not to be confused with gtairobj.py ("air" radio objects) """
from struct import pack, unpack
from binascii import hexlify, unhexlify
from datetime import datetime
import time
from pyTLV import tlvPack, tlvRead
from pygth16 import gtAlgoH16
from gtdefs import * # noqa: F403
def gtMakeAPIMsg(msgBlob, msgClass, msgAppID, fromGID, destGID=0, destTag=0,
meshTTL=3, seqNo0=0, seqNo1=0, crypt=0):
"""
Assemble a GTM API compatible message PDU (WITH top-level TLVs)
(for API command 03 - OP_SENDMSG)
"""
# 1) Message destination element
msgDest = pack('!BH', msgClass, msgAppID)
if msgClass in (MSG_CLASS_P2P, MSG_CLASS_GROUP):
# Destination address only in addressed messages
msgDest += unhexlify('%012x%02x' % (destGID, destTag))
# 2) Message header element (sender GID, timestamp and seq numbers)
msgHead = pack('!BQLHB', crypt, fromGID, int(time.time()), seqNo0, seqNo1)
msgHeadTLV = tlvPack(MESG_TLV_HEAD, msgHead)
# 3) Assemble the PDU: Dest, 0x04, Data (Head + Blob), Mesh TTL
msgFullPDU = tlvPack(MESG_TLV_DEST, msgDest)
if msgClass == MSG_CLASS_P2P:
# Element 0x04 only in P2P messages
msgFullPDU += tlvPack(0x04, b'\xff\x00\x00')
msgFullPDU += tlvPack(MESG_TLV_DATA, msgHeadTLV + msgBlob)
msgFullPDU += tlvPack(MESG_TLV_TTL, pack('B', meshTTL))
return msgFullPDU
def gtReadAPIMsg(msgPDU, verbose=1):
"""
Parse a GTM API message PDU (WITH top-level TLVs)
(via API command 06 - OP_READMSG)
"""
msg = {}
# Message PDU is a TLV structure
for type, length, value in tlvRead(msgPDU):
if verbose:
print("[MESG] TYPE %02x: " % type + hexlify(value).decode())
if type == MESG_TLV_DEST: # Destination element
(msg['classID'], msg['appID']) = unpack("!BH", value[:3])
if msg['classID'] in (MSG_CLASS_P2P, MSG_CLASS_GROUP):
# Non-broadcast messages have a destination address:
# extract 6-byte destGID and 1-byte dest tag
# (there's no unpack template for 48-bit numbers, so we
# unpack AppID+GID as a 64-bit number and mask out AppID)
msg['destGID'] = unpack('!Q', value[1:9])[0] & 0xffffffffffff
msg['destTag'] = bytearray(value)[9]
if verbose:
print("[MSGD] CLASSID: %02x (%s)" %
(msg['classID'], MSG_CLASS_NAME[msg['classID']]))
print("[MSGD] APPID : %04x" % msg['appID'])
if 'destGID' in msg:
print("[MSGD] DESTGID: %012x" % msg['destGID'])
print("[MSGD] DESTTAG: %02x" % msg['destTag'])
elif type == MESG_TLV_DATA: # Main (DATA) element
if (length < 16):
print("WW: Length %02x invalid for DATA TLV" % length)
continue
stype, slength, = unpack('BB', value[:2])
# This is really the HEAD (0xFB) element, its format is strict
# so we'll just parse it as a fixed struct
if (stype != 0xfb): # Expecting first byte to be FB
print("WW: Don't know how to parse: " +
hexlify(value).decode())
continue
if (slength != 0x10):
print("WW: Length %02x invalid for FB TLV" % slength)
(msg['cryptFlag'], msg['fromGID'], msg['tstamp'],
msg['seqNo0'], msg['seqNo1']) = unpack('!BQLHB', value[2:18])
msg['hashID'] = gtAlgoH16(value[2:18])
if verbose:
print("[MSGH] ENCRYPT: %01x" % msg['cryptFlag'])
print("[MSGH] FROMGID: %012x" % msg['fromGID'])
print("[MSGH] DTSTAMP: " + "%08x (%s)" % (msg['tstamp'],
datetime.fromtimestamp(msg['tstamp']).
strftime("%Y-%m-%d %H:%M:%S")))
print("[MSGH] SEQNO_0: %04x" % msg['seqNo0'])
print("[MSGH] SEQNO_1: %02x" % msg['seqNo1'])
# skip the HEAD element
value = value[18:]
# message content is here
msg['msgBlob'] = value # as received
elif type == MESG_TLV_0x04: # Unknown TLV 4
msg['tlv_04'] = value
elif type == MESG_TLV_DLR: # Delivery ACK
(msg['ackStatus'], msg['ackMsgID'],) = unpack('!BH', value)
if verbose:
print(" Delivery ACK: status 0x%02x for message ID 0x%04x" %
(msg['ackStatus'], msg['ackMsgID']))
elif type == MESG_TLV_HOPS: # Number of hops
(msg['meshHops'], msg['dChRSSI'],) = unpack('BB', value)
if verbose:
print(" Received via %d hops, dChRSSI=0x%02x" %
(msg['meshHops'], msg['dChRSSI']))
return msg
|
from .Proxy import Proxy
from robot.libraries.BuiltIn import BuiltIn
import sys
from robot.libraries.Screenshot import Screenshot
from robot.api import logger
import I18nListener as i18n
import ManyTranslations as ui
from robot.libraries.Collections import _Dictionary
class DictionariesShouldBeEqualProxy(Proxy):
def __init__(self, arg_format):
arg_format[repr(['dict1', 'dict2', 'msg=None', 'values=True'])] = self
def i18n_Proxy(self, func):
def proxy(self, dict1, dict2, msg=None, values=True):
full_args = [str(dict1), str(dict2)]
dict1_keys_trans = i18n.I18nListener.MAP.values(list(dict1.keys()), full_args)
dict1_values_trans = i18n.I18nListener.MAP.values(list(dict1.values()), full_args)
dict2_keys_trans = i18n.I18nListener.MAP.values(list(dict2.keys()), full_args)
dict2_values_trans = i18n.I18nListener.MAP.values(list(dict2.values()), full_args)
whole_trans = []
whole_trans.append(dict1_keys_trans)
whole_trans.append(dict1_values_trans)
whole_trans.append(dict2_keys_trans)
whole_trans.append(dict2_values_trans)
dict_have_multi_trans = False
for i in range(4):
for dt in whole_trans[i]:
if len(dt)>1:
dict_have_multi_trans = True
break
new_dict1 = {}
new_dict2 = {}
new_dict2=dict(zip(list(dict2.keys()), dict2_values_trans))
if dict_have_multi_trans:
DictionariesShouldBeEqualProxy.show_warning(self, dict1, dict2, full_args)
if 'contain_sub' in func.__name__:
keys = self.get_dictionary_keys(dict2)
contain_key = True
for k in keys:
if k not in dict1:
contain_key = False
break
if contain_key and not list(_Dictionary._yield_dict_diffs(self, keys, dict1, dict2)):
diffs = False
else:
diffs = True
elif 'equal' in func.__name__:
try:
keys = _Dictionary._keys_should_be_equal(self, dict1, dict2, msg, values)
diffs = list(_Dictionary._yield_dict_diffs(self, keys, dict1, dict2))
for k in keys:
if dict1[k] in dict2_values_trans[0]:
diffs = False
except:
for dict1_key in dict1.keys():
for dict2_key in new_dict2.keys():
if [dict1_key] in dict2_keys_trans and dict1[dict1_key] in new_dict2[dict2_key][0]:
new_dict1[dict2_key] = new_dict2[dict2_key]
diffs = False
else:
diffs = True
break
if not diffs:
i18n.I18nListener.Is_Multi_Trans = True
for i, dt in enumerate(dict1_keys_trans):
if len(dt)>1 and str(full_args)+list(dict1.keys())[i] not in ui.UI.unique_log:
ui.UI.origin_xpaths_or_arguments.append(full_args)
multi_trans_word = [list(dict1.keys())[i]]
ui.UI.add_trans_info(self, multi_trans_word, dt, full_args, func.__name__)
for i, dt in enumerate(dict1_values_trans):
if len(dt)>1 and str(full_args)+list(dict1.values())[i] not in ui.UI.unique_log:
ui.UI.origin_xpaths_or_arguments.append(full_args)
multi_trans_word = [list(dict1.values())[i]]
ui.UI.add_trans_info(self, multi_trans_word, dt, full_args, func.__name__)
for i, dt in enumerate(dict2_keys_trans):
if len(dt)>1 and str(full_args)+list(dict2.keys())[i] not in ui.UI.unique_log:
ui.UI.origin_xpaths_or_arguments.append(full_args)
multi_trans_word = [list(dict2.keys())[i]]
ui.UI.add_trans_info(self, multi_trans_word, dt, full_args, func.__name__)
for i, dt in enumerate(dict2_values_trans):
if len(dt)>1 and str(full_args)+list(dict2.values())[i] not in ui.UI.unique_log:
ui.UI.origin_xpaths_or_arguments.append(full_args)
multi_trans_word = [list(dict2.values())[i]]
ui.UI.add_trans_info(self, multi_trans_word, dt, full_args, func.__name__)
dict1 = dict(zip(list(dict1.keys()), dict1_values_trans))
dict2 = dict(zip(list(dict2.keys()), dict2_values_trans))
for dict1_key in dict1.keys():
for dict2_key in dict2.keys():
if [dict1_key] in dict2_keys_trans and dict1[dict1_key]== dict2[dict2_key]:
dict1.pop(dict1_key, None)
dict1[dict2_key] = dict2[dict2_key]
return func(self, dict1, dict2)
elif dict1_key== dict2_key and dict1[dict1_key][0] in dict2[dict2_key]:
dict1[dict1_key] = dict2[dict2_key]
if new_dict1:
return func(self, new_dict1, new_dict2, msg, values)
else:
return func(self, dict1, dict2, msg, values)
return proxy
def show_warning(self, dict1, dict2, full_args):
language = 'i18n in %s:\n ' %i18n.I18nListener.LOCALE
test_name = ('Test Name: %s') %BuiltIn().get_variable_value("${TEST NAME}") + '=> Exist multiple translations of the word' + '\n'
message_for_dict1_key = Proxy().deal_warning_message_for_list(dict1.keys(), full_args, 'Dict1KEY')
message_for_dict1_value = Proxy().deal_warning_message_for_list(dict1.values(), full_args, 'Dict1VALUE')
message_for_dict2_key = Proxy().deal_warning_message_for_list(dict2.keys(), full_args, 'Dict2KEY')
message_for_dict2_value = Proxy().deal_warning_message_for_list(dict2.values(), full_args, 'Dict2VALUE')
message = language + test_name + message_for_dict1_key + '\n' + message_for_dict1_value + '\n' \
+ message_for_dict2_key + '\n' + message_for_dict2_value + '\n' + 'You should verify translation is correct!'
if message_for_dict1_key or message_for_dict1_value or message_for_dict2_key or message_for_dict2_value:
logger.warn(message)
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""OCP Query Handling for Reports."""
import copy
import logging
from django.db.models import F
from tenant_schemas.utils import tenant_context
from api.models import Provider
from api.report.azure.openshift.provider_map import OCPAzureProviderMap
from api.report.azure.query_handler import AzureReportQueryHandler
from api.report.queries import is_grouped_by_project
LOG = logging.getLogger(__name__)
class OCPAzureReportQueryHandler(AzureReportQueryHandler):
"""Handles report queries and responses for OCP on Azure."""
provider = Provider.OCP_AZURE
def __init__(self, parameters):
"""Establish OCP report query handler.
Args:
parameters (QueryParameters): parameter object for query
"""
self._mapper = OCPAzureProviderMap(provider=self.provider, report_type=parameters.report_type)
# Update which field is used to calculate cost by group by param.
if is_grouped_by_project(parameters):
self._report_type = parameters.report_type + "_by_project"
self._mapper = OCPAzureProviderMap(provider=self.provider, report_type=self._report_type)
self.group_by_options = self._mapper.provider_map.get("group_by_options")
self._limit = parameters.get_filter("limit")
# super() needs to be called after _mapper and _limit is set
super().__init__(parameters)
# super() needs to be called before _get_group_by is called
@property
def annotations(self):
"""Create dictionary for query annotations.
Returns:
(Dict): query annotations dictionary
"""
annotations = {"date": self.date_trunc("usage_start")}
# { query_param: database_field_name }
fields = self._mapper.provider_map.get("annotations")
for q_param, db_field in fields.items():
annotations[q_param] = F(db_field)
if (
"project" in self.parameters.parameters.get("group_by", {})
or "and:project" in self.parameters.parameters.get("group_by", {})
or "or:project" in self.parameters.parameters.get("group_by", {})
):
annotations["project"] = F("namespace")
return annotations
def execute_query(self): # noqa: C901
"""Execute query and return provided data.
Returns:
(Dict): Dictionary response of query params, data, and total
"""
query_sum = self.initialize_totals()
data = []
with tenant_context(self.tenant):
query = self.query_table.objects.filter(self.query_filter)
query_data = query.annotate(**self.annotations)
group_by_value = self._get_group_by()
query_group_by = ["date"] + group_by_value
query_order_by = ["-date"]
query_order_by.extend([self.order]) # add implicit ordering
annotations = self._mapper.report_type_map.get("annotations")
query_data = query_data.values(*query_group_by).annotate(**annotations)
if self._limit and query_data:
query_data = self._group_by_ranks(query, query_data)
if not self.parameters.get("order_by"):
# override implicit ordering when using ranked ordering.
query_order_by[-1] = "rank"
if query.exists():
aggregates = self._mapper.report_type_map.get("aggregates")
metric_sum = query.aggregate(**aggregates)
query_sum = {key: metric_sum.get(key) for key in aggregates}
if self._delta:
query_data = self.add_deltas(query_data, query_sum)
is_csv_output = self.parameters.accept_type and "text/csv" in self.parameters.accept_type
query_data = self.order_by(query_data, query_order_by)
cost_units_value = self._mapper.report_type_map.get("cost_units_fallback", "USD")
usage_units_value = self._mapper.report_type_map.get("usage_units_fallback")
count_units_value = self._mapper.report_type_map.get("count_units_fallback")
if query_data:
cost_units_value = query_data[0].get("cost_units")
if self._mapper.usage_units_key:
usage_units_value = query_data[0].get("usage_units")
if self._mapper.report_type_map.get("annotations", {}).get("count_units"):
count_units_value = query_data[0].get("count_units")
if is_csv_output:
if self._limit:
data = self._ranked_list(list(query_data))
else:
data = list(query_data)
else:
groups = copy.deepcopy(query_group_by)
groups.remove("date")
data = self._apply_group_by(list(query_data), groups)
data = self._transform_data(query_group_by, 0, data)
init_order_keys = []
query_sum["cost_units"] = cost_units_value
if self._mapper.usage_units_key and usage_units_value:
init_order_keys = ["usage_units"]
query_sum["usage_units"] = usage_units_value
if self._mapper.report_type_map.get("annotations", {}).get("count_units") and count_units_value:
query_sum["count_units"] = count_units_value
key_order = list(init_order_keys + list(annotations.keys()))
ordered_total = {total_key: query_sum[total_key] for total_key in key_order if total_key in query_sum}
ordered_total.update(query_sum)
self._pack_data_object(ordered_total, **self._mapper.PACK_DEFINITIONS)
self.query_sum = ordered_total
self.query_data = data
return self._format_query_response()
|
# -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HTTP utilities
==============
This module implements various common HTTP utilities.
:Variables:
- `CR`: ASCII CR byte (\\r)
- `LF`: ASCII LF byte (\\n)
- `CRLF`: ASCII CRLF sequence (\\r\\n)
:Types:
- `CR`: ``str``
- `LF`: ``str``
- `CRLF`: ``str``
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import datetime as _datetime
import re as _re
from wtf import Error
CR = "\x0D"
LF = "\x0A"
CRLF = CR + LF
class HeaderError(Error):
""" Base header parse error """
class InvalidHeaderLine(HeaderError):
""" A header line is invalid """
class IncompleteHeaders(HeaderError):
""" The headers are incomplete """
def make_date(stamp=None, cookie=False):
"""
Make a HTTP date
:Parameters:
- `stamp`: The UTC timestamp to process. If omitted or ``None``, the
current time is taken
:Types:
- `stamp`: ``datetime.datetime``
:return: The HTTP date string
:rtype: ``str``
"""
self = make_date
if stamp is None:
stamp = _datetime.datetime.utcnow()
return stamp.strftime(
"%%(wday)s, %d%%(sep)s%%(month)s%%(sep)s%Y %H:%M:%S GMT"
) % {
'wday': self.wdays[stamp.weekday()], # pylint: disable = E1101
'month': self.months[stamp.month], # pylint: disable = E1101
'sep': [' ', '-'][bool(cookie)],
}
make_date.wdays = ( # pylint: disable = W0612
'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'
)
make_date.months = (None, # pylint: disable = W0612
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',
)
def read_headers(stream):
"""
Read MIME headers from stream
:Parameters:
- `stream`: The stream to read from
:Types:
- `stream`: ``file``
:return: Dictionary of lists of headers (``{'name': ['val', ...], ...}``)
:rtype: ``dict``
:Exceptions:
- `httputil.InvalidHeaderLine`: Unparsable header line
- `httputil.IncompleteHeaders`: Stream ended before the final empty line
"""
headers = {}
self, name, values = read_headers, None, None
while True:
line = stream.readline()
if not line:
raise IncompleteHeaders("Headers not completed")
line = line[:-1 - line.endswith(CRLF)]
if self.CONT_MATCH(line): # pylint: disable = E1101
if name is None:
raise InvalidHeaderLine(
"Continuation line without line to continue")
values.append(line.lstrip())
continue
elif name is not None:
headers.setdefault(name.lower(), []
).append(" ".join(values))
if not line: # empty separator line, finished reading
break
match = self.HEADER_MATCH(line) # pylint: disable = E1101
if not match:
raise InvalidHeaderLine("Invalid header line format")
name, value = match.group('name', 'value')
values = [value]
return headers
read_headers.CONT_MATCH = _re.compile(r'\s').match # pylint: disable = W0612
# token chars from rfc 2616:
# ''.join(c for c in map(chr, range(33, 127))
# if c not in '()<>@,;:\\"/[]?={}')
read_headers.HEADER_MATCH = _re.compile( # pylint: disable = W0612
r'''(?P<name>[-!#$%&'*+.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
r'''^_`abcdefghijklmnopqrstuvwxyz|~]+)\s*:\s*(?P<value>.*)$''',
_re.X).match
class CookieCodecInterface(object):
""" Interface for Cookie codecs """
def encode(self, value):
"""
Encode the cookie value to a 7bit US-ASCII string
This method is also responsible for quoting the value if necessary.
:Parameters:
- `value`: The value to encode
:Types:
- `value`: any
:return: The encoded value
:rtype: ``str``
"""
def decode(self, value):
"""
Decode the cookie value from 7bit US-ASCII string
:Parameters:
- `value`: The cookie string (as submitted)
:Types:
- `value`: ``str``
:return: The decoded value
:rtype: any
:Exceptions:
- `ValueError`: The value could not be decoded properly
"""
class CookieMaker(object):
"""
Cookie maker helper class
:CVariables:
- `UNSAFE_SEARCH`: Unsafe character search function
- `_ATTR`: Attribute spelling and type getter
- `KEYS`: Valid attribute keys
:IVariables:
- `_encode`: Value encoder
:Types:
- `UNSAFE_SEARCH`: ``callable``
- `_ATTR`: ``callable``
- `KEYS`: ``tuple``
- `_encode`: ``callable``
"""
UNSAFE_SEARCH = _re.compile(r"[^a-zA-Z\d!#$%&'*+.^_`|~-]").search
_ATTR = dict(
expires=("expires", 'date'),
path= ("Path", 'ustring'),
comment=("Comment", 'string'),
domain= ("Domain", 'ustring'),
max_age=("Max-Age", 'int'),
secure= ("secure", 'bool'),
version=("Version", 'string'),
)
KEYS = tuple(sorted(_ATTR.keys()))
_ATTR = _ATTR.get
def __init__(self, codec=None):
"""
Initialization
:Parameters:
- `codec`: Cookie codec to apply. If unset or ``None``, an identity
codec is applied (leaving 8bit chars as-is)
:Types:
- `codec`: `CookieCodecInterface`
"""
if codec is None:
encode = lambda x: x
else:
encode = codec.encode
self._encode = encode
def __call__(self, name, value, **kwargs):
"""
Create the cookie string
Cookie parameters are given in kwargs. Valid keys are listed in
`KEYS`. ``None``-values are ignored. Here are short descriptions of
the valid parameters:
``comment``
Cookie comment (``str``)
``domain``
Valid domain (``str``)
``expires``
Expire time of the cookie (``datetime.datetime``). If unset
or ``None`` the cookie is dropped when the browser is closed.
See also the ``max_age`` keyword.
``max_age``
Max age of the cookie in seconds (``int``). If set, make sure it
matches the expiry time. The difference is that expires will be
transformed to a HTTP date, while max-age will stay an integer.
The expires parameter is the older one and better understood by
the clients out there. For that reason if you set max_age only,
expires will be set automatically to ``now + max_age``. If unset
or ``None`` the cookie will be dropped when the browser is closed.
``path``
Valid URL base path for the cookie. It should always be set to a
reasonable path (at least ``/``), otherwise the cookie will only
be valid for the current URL and below.
``secure``
Whether this is an SSL-only cookie or not (``bool``)
``version``
Cookie spec version (``int``). See `RFC 2965`_
.. _RFC 2965: http://www.ietf.org/rfc/rfc2965.txt
:Parameters:
- `name`: Cookie name
- `value`: Cookie value (if a codec was given, the type should be
applicable for the codec encoder).
- `kwargs`: Cookie parameters
:Types:
- `name`: ``str``
- `value`: ``str``
- `kwargs`: ``dict``
:return: The cookie string
:rtype: ``str``
:Exceptions:
- `ValueError`: Invalid name or values given
- `TypeError`: Unrecognized attributes given
"""
if self.UNSAFE_SEARCH(name):
raise ValueError("%r is unsafe as key" % (name,))
elif name.lower().replace('-', '_') in self.KEYS:
raise ValueError("%s is a reserved attribute and cannot be used "
"as name" % (name,))
items = ["%s=%s" % (str(name), str(self._encode(value)))]
if kwargs.get('max_age') is not None:
kwargs['max_age'] = max(0, kwargs['max_age'])
if kwargs.get('expires') is None:
kwargs['expires'] = (
_datetime.datetime.utcnow() +
_datetime.timedelta(seconds=kwargs['max_age'])
)
for key in self.KEYS:
if key in kwargs:
val = kwargs.pop(key)
if val is not None:
key, translator = self._ATTR(key)
value = getattr(self, '_' + translator)(key, val)
if value is not None:
items.append(str(value))
if kwargs:
raise TypeError("Unrecognized keywords: %r" % (kwargs.keys(),))
return "; ".join(item for item in items if item is not None)
@staticmethod
def _date(key, value):
""" Date translator """
return "%s=%s" % (key, make_date(value, cookie=True))
@staticmethod
def _int(key, value):
""" Integer translator """
return "%s=%d" % (key, int(value))
@staticmethod
def _bool(key, value):
""" Boolean translator """
if value:
return key
return None
def _string(self, key, value):
""" String translator """
return "%s=%s" % (key, self._encode(value))
@staticmethod
def _ustring(key, value):
""" Unquoted string translator """
return "%s=%s" % (key, value)
def make_cookie(name, value, codec=None, **kwargs):
"""
Make a cookie
The is a simple interface to the `CookieMaker` class. See there for
detailed information.
:Parameters:
- `name`: Cookie name
- `value`: Cookie value
- `codec`: Value codec. If unset or ``None``, the identity codec is
applied.
- `kwargs`: Cookie attributes
:Types:
- `name`: ``str``
- `value`: ``str``
- `codec`: `CookieCodecInterface`
- `kwargs`: ``dict``
:return: The cookie string
:rtype: ``str``
:Exceptions:
- `ValueError`: Invalid name or values given
- `TypeError`: Unrecognized attributes given
"""
return CookieMaker(codec)(name, value, **kwargs)
|
import sys
import json
import argparse
import ply
import os
# The FastParser parses input using PLY
class FastParser:
def __init__(self, generatedFilesOutputDir=None):
from ply import lex
from ply import yacc
generateFiles = not generatedFilesOutputDir is None
if generateFiles:
sys.path.append(generatedFilesOutputDir)
# Build the lexer and the parser
self.lexer = lex.lex(module=self,
optimize=generateFiles,
outputdir=generatedFilesOutputDir)
self.parser = yacc.yacc(module=self,
write_tables=generateFiles,
outputdir=generatedFilesOutputDir,
debug=generateFiles)
def parse(self, programString, filePath=None):
"""
Parse a program string.
The path of the program file is added as metadata to returned object
"""
# Add trailing endline if missing
if len(programString) and programString[-1] != "\n":
programString += "\n"
# Parse program and add FilePath key to object
program = self.parser.parse(programString)
if filePath is not None:
program["FilePath"] = os.path.abspath(filePath)
else:
program["FilePath"] = "Unknown"
return program
# Lexer specification
# ---------------------------------------
tokens = (
"INTLITERAL",
"REALLITERAL",
"FLOATLITERAL",
"STRINGLITERAL",
"CHARLITERAL",
"BOOLLITERAL",
"BASICTYPE",
"SHOGUNSGTYPE",
"PRINTKEYWORD",
"COMMA",
"DOT",
"COLON",
"ENUMKEYWORD",
"EQUALS",
"LPAREN",
"RPAREN",
"LSQUARE",
"RSQUARE",
"COMMENT",
"NEWLINE",
"IDENTIFIER"
)
reserved = {
'enum': 'ENUMKEYWORD',
'print': 'PRINTKEYWORD',
'True': 'BOOLLITERAL',
'False': 'BOOLLITERAL',
'int': 'BASICTYPE',
'bool': 'BASICTYPE',
'float': 'BASICTYPE',
'real': 'BASICTYPE',
'string': 'BASICTYPE',
'char': 'BASICTYPE',
'BoolVector': 'SHOGUNSGTYPE',
'CharVector': 'SHOGUNSGTYPE',
'ByteVector': 'SHOGUNSGTYPE',
'WordVector': 'SHOGUNSGTYPE',
'ShortVector': 'SHOGUNSGTYPE',
'IntVector': 'SHOGUNSGTYPE',
'LongIntVector': 'SHOGUNSGTYPE',
'ULongIntVector': 'SHOGUNSGTYPE',
'ShortRealVector': 'SHOGUNSGTYPE',
'RealVector': 'SHOGUNSGTYPE',
'LongRealVector': 'SHOGUNSGTYPE',
'ComplexVector': 'SHOGUNSGTYPE',
'BoolMatrix': 'SHOGUNSGTYPE',
'CharMatrix': 'SHOGUNSGTYPE',
'ByteMatrix': 'SHOGUNSGTYPE',
'WordMatrix': 'SHOGUNSGTYPE',
'ShortMatrix': 'SHOGUNSGTYPE',
'IntMatrix': 'SHOGUNSGTYPE',
'LongIntMatrix': 'SHOGUNSGTYPE',
'ULongIntMatrix': 'SHOGUNSGTYPE',
'ShortRealMatrix': 'SHOGUNSGTYPE',
'RealMatrix': 'SHOGUNSGTYPE',
'LongRealMatrix': 'SHOGUNSGTYPE',
'ComplexMatrix': 'SHOGUNSGTYPE',
'StringCharList':'SHOGUNSGTYPE',
'StringWordList':'SHOGUNSGTYPE'
}
t_INTLITERAL = "-?[0-9]+"
t_REALLITERAL = "-?[0-9]+\.[0-9]+"
t_FLOATLITERAL = "-?[0-9]+\.[0-9]+f"
t_STRINGLITERAL = '"[^"\n]*"'
t_CHARLITERAL = "'[^']{1}'"
t_COMMA = ","
t_DOT = "\."
t_COLON = ":"
t_EQUALS = "="
t_LPAREN = "\("
t_RPAREN = "\)"
t_LSQUARE = "\["
t_RSQUARE = "\]"
t_ignore = " \t"
def t_IDENTIFIER(self, t):
"[a-zA-Z][a-zA-Z0-9-_]*"
# Check for reserved words
t.type = self.reserved.get(t.value, 'IDENTIFIER')
return t
def t_NEWLINE(self, t):
r'\n'
t.lexer.lineno += 1
return t
def t_COMMENT(self, t):
r"\#.*\n"
t.lexer.lineno += 1
return t
def t_error(self, t):
raise TypeError("Failed to tokenize input. Unknown text on line %d '%s'" % (t.lineno, t.value,))
# Grammar specification
# ---------------------------------------
def p_program(self, p):
"program : statements"
p[0] = {"Program": p[1]}
def p_statements(self, p):
"""
statements : statement statements
| comment statements
|
"""
if len(p) > 2:
p[0] = p[1:2]
p[0].extend(p[2])
else:
p[0] = []
def p_comment(self, p):
"comment : COMMENT"
# Strip leading hashtag symbol and trailing newline
p[0] = {"Comment": p[1][1:-1],
"__PARSER_INFO_LINE_NO": p.lineno(1)}
def p_type(self, p):
"""
type : basictype
| shogunsgtype
| objecttype
"""
p[0] = p[1]
def p_basicType(self, p):
"basictype : BASICTYPE"
p[0] = {"BasicType": p[1]}
def p_shogunSGType(self, p):
"shogunsgtype : SHOGUNSGTYPE"
p[0] = {"ShogunSGType": p[1]}
def p_objectType(self, p):
"objecttype : IDENTIFIER"
p[0] = {"ObjectType": p[1]}
def p_argumentList(self, p):
"""
argumentList : argumentListNonEmpty
|
"""
arguments = []
if len(p) > 1:
arguments = p[1]
p[0] = {"ArgumentList": arguments}
def p_argumentListNonEmpty(self, p):
"""
argumentListNonEmpty : argumentListElement
| argumentListElement COMMA argumentListNonEmpty
"""
p[0] = [p[1]]
if len(p) > 2:
p[0].extend(p[3])
def p_argumentListElement(self, p):
"""
argumentListElement : expr
| identifier EQUALS expr
"""
if "Expr" in p[1]:
p[0] = p[1]
else:
p[0] = {"KeywordArgument": [p[1], p[3]]}
def p_identifier(self, p):
"identifier : IDENTIFIER"
p[0] = {"Identifier": p[1]}
def p_methodCall(self, p):
"methodCall : identifier DOT identifier LPAREN argumentList RPAREN"
p[0] = {"MethodCall": [p[1], p[3], p[5]]}
def p_staticCall(self, p):
"staticCall : type COLON identifier LPAREN argumentList RPAREN"
p[0] = {"StaticCall": [p[1], p[3], p[5]]}
def p_globalCall(self, p):
"globalCall : identifier LPAREN argumentList RPAREN"
p[0] = {"GlobalCall": [p[1], p[3]]}
def p_indexList(self, p):
"""
indexList : int
| int COMMA indexList
"""
p[0] = [p[1]]
if len(p) > 2:
p[0].extend(p[3])
def p_elementAccess(self, p):
"elementAccess : identifier LSQUARE indexList RSQUARE"
p[0] = {"ElementAccess": [p[1],
{"IndexList": p[3]}]}
def p_enum(self, p):
"enum : ENUMKEYWORD identifier DOT identifier"
p[0] = {"Enum": [p[2], p[4]]}
def p_string(self, p):
"string : STRINGLITERAL"
# Strip leading and trailing quotes
p[0] = {"StringLiteral": p[1][1:-1]}
def p_char(self, p):
"char : CHARLITERAL"
# Strip leading and trailing quotes
p[0] = {"CharLiteral": p[1][1:-1]}
def p_bool(self, p):
"bool : BOOLLITERAL"
p[0] = {"BoolLiteral": p[1]}
def p_int(self, p):
"int : INTLITERAL"
p[0] = {"IntLiteral": p[1]}
def p_real(self, p):
"real : REALLITERAL"
p[0] = {"RealLiteral": p[1]}
def p_float(self, p):
"float : FLOATLITERAL"
p[0] = {"FloatLiteral": p[1][:-1]}
def p_expr(self, p):
"""
expr : enum
| methodCall
| staticCall
| globalCall
| elementAccess
| string
| char
| bool
| int
| real
| float
| identifier
"""
p[0] = {"Expr": p[1]}
def p_assignment(self, p):
"""
assignment : identifier EQUALS expr
| elementAccess EQUALS expr
"""
p[0] = {"Assign": [p[1], p[3]]}
def p_initialisation(self, p):
"""
initialisation : type identifier EQUALS expr
| type identifier LPAREN argumentList RPAREN
"""
p[0] = {"Init": [p[1], p[2], p[4]]}
def p_listInitialisation(self, p):
"""
listInitialisation : type identifier LPAREN LSQUARE argumentList RSQUARE RPAREN
"""
p[0] = {"ListInit": [p[1], p[2], p[5]]}
def p_output(self, p):
"output : PRINTKEYWORD expr"
p[0] = {"Print": p[2]}
def p_statement(self, p):
"""
statement : initialisation NEWLINE
| listInitialisation NEWLINE
| assignment NEWLINE
| expr NEWLINE
| output NEWLINE
| NEWLINE
"""
p[0] = {"Statement": p[1],
"__PARSER_INFO_LINE_NO": p.lineno(-1)}
# Error rule for syntax errors
def p_error(self, p):
if p:
print("Syntax error in input: " +
str(p.value) + " on line " + str(p.lineno))
else:
print("Reached end of file without completing parse")
def parse(programString, filePath, generatedFilesOutputDir=None):
parser = FastParser(generatedFilesOutputDir)
# Parse input
return parser.parse(programString, filePath)
if __name__ == "__main__":
# Parse command line arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("--pretty", action="store_true", help="If specified, output is pretty printed")
argparser.add_argument("path", nargs='?', help="Path to input file. If not specified input is read from stdin")
argparser.add_argument("--parser_files_dir", nargs='?', help='Path to directory where generated parser and lexer files should be stored.')
argparser.add_argument('--only_generate_parser_files', action="store_true", help="If specified, generate the parser files and quit without parsing stdin or the file at 'path'")
args = argparser.parse_args()
if args.only_generate_parser_files:
FastParser(args.parser_files_dir)
exit(0)
programString = ""
filePath = ""
# Read from specified file or, if not specified, from stdin
if args.path:
with open(args.path, 'r') as file:
programString = file.read()
filePath = args.path
else:
programString = sys.stdin.read()
filePath = "sys.stdin"
indentWidth = 2 if args.pretty > 0 else None
# Parse input and print json output
program = parse(programString, filePath, args.parser_files_dir)
print(json.dumps(program, indent=indentWidth))
|
from pyzork.menu_scene import MenuScene
from pyzork.room_scene import RoomScene
# Responsible for loading scenes, processing commands,
# Owns all actors.
class Director:
def __init__(self, settings, game_data):
self.settings = settings
self.menu_data = game_data["menus"]
self.room_data = game_data["rooms"]
self.command_data = game_data["commands"]
self.current_scene = None
self.actor_player = None # TODO Make a player to track inventory state
# Start the game on the title-menu
self.next_scene_request = ("menu", self.menu_data[0]["id"])
def direct(self):
while True:
next_scene = self.get_next_scene()
if (not not next_scene
or next_scene.id != self.current_scene.id
or next_scene.type != self.current_scene.type):
self.current_scene = next_scene
self.current_scene.set_scene()
if self.settings["debug"]:
print(f"Next scene found: {self.current_scene.name}")
player_command_tuple = self.current_scene.run_scene()
if self.settings["debug"]:
print(f"Got command {player_command_tuple}")
should_exit = self.execute_player_command(player_command_tuple)
if should_exit:
break
# TODO replace with a "goodbye" scene?
return "Thanks for playing!"
# TODO Consider commands having their own handler?
def execute_player_command(self, player_command_tuple):
# First find the command prototype for this player_command
(prototype_command, scene_command) = player_command_tuple
command_kind = prototype_command["kind"]
if command_kind == "move-to-target":
# get the target to set the scene
room_target = scene_command["target"]
self.next_scene_request = ("room", room_target)
elif command_kind == "menu-game-exit":
# Special command that exits the while loop.
return True
elif command_kind == "menu-game-start":
# Special command that always goes to room-1
self.next_scene_request = ("room", 1)
elif command_kind == "menu-game-settings-low":
# Change graphics settings to low
self.settings["graphics"] = "low"
self.settings["display"]["use_truecolor"] = False
self.settings["display"]["use_8color"] = True
elif command_kind == "menu-game-settings-high":
# Change graphics settings to low
self.settings["graphics"] = "high"
self.settings["display"]["use_truecolor"] = True
self.settings["display"]["use_8color"] = False
else:
raise Exception("Unknown command kind", command_kind)
return False
def get_next_scene(self):
if not self.next_scene_request:
return self.current_scene
(next_scene_type, next_scene_id) = self.next_scene_request
if not next_scene_type or not next_scene_id:
return self.current_scene
if next_scene_type == "room":
next_scene_data = next(iter([room for room in self.room_data if room["id"] == next_scene_id]), None)
if not next_scene_data:
raise Exception("No scene data found for id", next_scene_id)
scene = RoomScene(
settings=self.settings,
scene_data=next_scene_data,
command_data=self.command_data)
elif next_scene_type == "menu":
next_scene_data = next(iter([menu for menu in self.menu_data if menu["id"] == next_scene_id]), None)
if not next_scene_data:
raise Exception("No scene data found for id", next_scene_id)
scene = MenuScene(
settings=self.settings,
scene_data=next_scene_data,
command_data=self.command_data)
else:
raise Exception("Unknown scene type", next_scene_type)
# Clear out pending scene requests
self.next_scene_id = ()
return scene
|
#!/usr/bin/python
import argparse
import sys, subprocess
import time
params = {
'GCI' : 'gcloud compute instances',
'ZONE' : 'us-west1-b',
'INSTANCE_NAME' : 'myinstance',
'TEMPLATE_NAME' : 'cpu-tiny',
# 'DISK' : 'disk1'
}
cpu_types = {
'cpu-tiny' : 'f1-micro', # $0.004, 1 cpu, 0.6 GB ram
'cpu-small' : 'g1-small', # $0.008, 1 cpu, 1.7 GB ram
'cpu-mid' : 'n1-standard-4', # $0.041, 4 cpu, 15.0 GB ram.
'cpu-big' : 'n1-standard-8', # $0.081, 8 cpu, 30.0 GB ram.
'gpu-tiny' : 'n1-standard-4', # $0.176, 4 cpu, 15.0 GB ram, 1 Nvidia K80
'gpu-small' : 'n1-standard-4', # $0.257, 4 cpu, 15.0 GB ram, 1 Nvidia P4
'gpu-mid' : 'n1-standard-8', # $0.511, 4 cpu, 15.0 GB ram, 1 Nvidia P100
'gpu-big' : 'n1-standard-8', # $0.821, 4 cpu, 15.0 GB ram, 1 Nvidia V100
}
gpu_types = {
'gpu-tiny' : 'nvidia-tesla-k80',
'gpu-small' : 'nvidia-tesla-p4',
'gpu-mid' : 'nvidia-tesla-p100',
'gpu-big' : 'nvidia-tesla-v100',
}
def start_func():
create_func()
ssh_func()
delete_func()
def create_func(type='cpu-tiny'):
cpu_type = cpu_types.get(type, "Invalid machine type")
cpu_conf = '--machine-type={0} '.format(cpu_type)
gpu_type = gpu_types.get(type);
gpu_conf = '--accelerator=type={0},count=1 '.format(gpu_type) if gpu_type else ''
disk_conf = '--disk=name={DISK},device-name={DISK},mode=rw,boot=no '.format(params.get('DISK')) \
if params.get('DISK') else ''
cmd = '{GCI} create {INSTANCE_NAME} --preemptible --zone {ZONE} ' \
'--image=c1-deeplearning-pytorch-0-4-cu92-20180925 --image-project=ml-images ' \
'--boot-disk-size=30GB ' \
' '.format(**params)
cmd = cmd + cpu_conf + gpu_conf + disk_conf
return_code = subprocess.call( cmd, shell=True)
def delete_func():
cmd ='{GCI} delete {INSTANCE_NAME} -q'.format( **params)
return_code = subprocess.call(cmd, shell=True)
list_func()
def list_func():
cmd = '{GCI} list '.format( **params)
return_code = subprocess.call(cmd, shell=True)
def ssh_func():
cmd = 'gcloud compute ssh {INSTANCE_NAME} -- -L 8888:localhost:8888'.format( **params)
return_code = subprocess.call(cmd, shell=True)
parser = argparse.ArgumentParser(description='gcloud compute instance.')
FUNCTION_MAP = {'start' : start_func,
'create' : create_func,
'delete' : delete_func,
'list' : list_func,
'ssh' : ssh_func}
start = time.time()
parser.add_argument('command', choices=FUNCTION_MAP.keys())
args = parser.parse_args()
func = FUNCTION_MAP[args.command]
func()
print("Time taken = {0:.2f} Seconds ".format((time.time() - start)))
|
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from alembic import context
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from anyway.core.database import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option('sqlalchemy.url', os.environ.get('DATABASE_URL'))
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
import hashlib
import json
import time
import sqlite3 as sql
import random
def get_head(email):
return 'https://s.gravatar.com/avatar/' + hashlib.md5(email.lower().encode()).hexdigest() + '?s=144'
class DataBase:
def __init__(self):
self.file_db_init = "db_init.sql"
self.file_room_init = "room_init.sql"
self.secret = "This program is owned by Lance."
self.error_preview = "错误:"
self.success = 'Success.'
self.error = {
"Success": "%s" % self.success,
"Error": "%s 服务器内部错误...请提交BUG给管理员。" % self.error_preview,
"Auth": "%s Auth 错误,请重新登录。" % self.error_preview,
"Password": "%s 密码错误。" % self.error_preview,
"NoUser": "%s 没有这个用户。" % self.error_preview,
"UserExist": "%s 此用户已存在。" % self.error_preview,
}
self.errors = {
"Success": str(0),
"Error": str(1),
"Auth": str(2),
"Password": str(7),
"NoUser": str(9),
"UserExist": str(10)
}
self.error_messages = {
str(0): self.error["Success"],
str(1): self.error["Error"],
str(2): self.error["Auth"],
str(7): self.error["Password"],
str(9): self.error["NoUser"],
str(10): self.error["UserExist"]
}
self.tables = ['users', 'files']
self.sql_types = {"SQLite": 0, "PostgreSQL": 1}
self.sql_type = self.sql_types['SQLite']
self.sql_chars = ["?", "%s"]
self.sql_char = self.sql_chars[self.sql_type]
self.conn = None
self.connect_init()
def connect_init(self):
self.conn = sql.connect('data_sql.db', check_same_thread=False)
def v(self, string: str):
return string.replace('%s', self.sql_char)
def cursor_get(self):
cursor = self.conn.cursor()
return cursor
def cursor_finish(self, cursor):
self.conn.commit()
cursor.close()
def make_result(self, code, **args):
result = {
"code": int(code),
"message": self.error_messages[str(code)],
"data": args
}
return json.dumps(result)
def check_in(self, table, line, value):
cursor = self.cursor_get()
try:
cursor.execute("SELECT %s FROM %s WHERE %s = \'%s\'" % (line, table, line, value))
except Exception as e:
print(e)
return False
result = cursor.fetchall()
self.cursor_finish(cursor)
if len(result) > 0:
return True
return False
def db_init(self):
try:
cursor = self.cursor_get()
for table in self.tables:
try:
cursor.execute("DROP TABLE IF EXISTS %s" % table)
except Exception as e:
print('Error when dropping:', table, '\nException:\n', e)
self.cursor_finish(cursor)
cursor = self.cursor_get()
self.cursor_finish(cursor)
except Exception as e:
print(e)
self.conn.close()
self.connect_init()
cursor = self.cursor_get()
# 一次只能执行一个语句。需要分割。而且中间居然不能有空语句。。
with open(self.file_db_init, encoding='utf8') as f:
string = f.read()
for s in string.split(';'):
try:
if s != '':
cursor.execute(s)
except Exception as e:
print('Error:\n', s, 'Exception:\n', e)
self.cursor_finish(cursor)
def create_user(self, username='Lance', password='',
email='lanceliang2018@163.com'):
if self.check_in("users", "username", username):
return self.make_result(self.errors["UserExist"])
cursor = self.cursor_get()
password = hashlib.md5(password.encode()).hexdigest()
cursor.execute(self.v("INSERT INTO users "
"(username, password, email) "
"VALUES (%s, %s, %s)"),
(username, password, email))
self.cursor_finish(cursor)
return self.make_result(0)
# 检查密码是否符合
def user_check(self, username, password):
if self.check_in("users", "username", username) is False:
return False
cursor = self.cursor_get()
password = hashlib.md5(password.encode()).hexdigest()
cursor.execute(self.v("SELECT password FROM users WHERE username = %s"), (username,))
data = cursor.fetchall()
if len(data) == 0:
return False
storage = data[0][0]
# print(storage)
self.cursor_finish(cursor)
if storage == password:
return True
return False
# 创建鉴权避免麻烦。鉴权(auth)格式:MD5(username, secret, time)
# UPDATE: 新的LoginToken: auth_mix(32) + order(32) + noise(4) = (68)
# 返回login_token
def create_auth(self, username, password):
cursor = self.cursor_get()
if not self.user_check(username, password):
return self.make_result(self.errors["Password"])
string = "%s %s %s" % (username, self.secret, str(time.time()))
auth = hashlib.md5(string.encode()).hexdigest()
# 获取token的时候不需要pre_auth。使用随机数。
# pre_auth = auth[:4]
pre_auth = "%04x" % random.randint(0, 1 << 16)
auth_li = []
for i in range(0, len(auth), 2):
auth_li.append(auth[i:i+2])
# 生成order
order = random.sample(range(0, 256), 16)
# 数字→排列
orderd = []
for i in range(len(order)):
# orderd.append({order[i]: i})
orderd.append({'num': order[i], 'key': i})
orderd.sort(key=lambda x: x['num'])
new_orderd = ['00', ] * 16
index = 0
for k in orderd:
# 这里取反了一次
new_orderd[k['key']] = "%02x" % (0xff - int(auth_li[index], 16))
index = index + 1
auth_mix = ''
for i in new_orderd:
auth_mix = auth_mix + i
result = '%s' % auth_mix
for i in order:
result = "%s%s" % (result, "%02x" % i)
login_token = result + pre_auth
# 这里才需要pre_auth
cursor.execute(self.v("UPDATE users SET auth = %s, pre_auth = %s WHERE username = %s"),
(auth, auth[:4], username))
self.cursor_finish(cursor)
print("DEBUG: auth:", auth)
return self.make_result(0, login_token={'login_token': login_token})
def check_auth(self, auth):
# 软性兼容。
if len(auth) > 32:
return self.check_token(auth)
result = self.check_in("users", "auth", auth)
if result is True:
return True
return False
# Token 格式:salted + salt + pre_auth = (68)
def token_parse(self, token):
if len(token) != 68:
return '0' * 32
salted = token[:32]
salt = token[32:-4]
pre_auth = token[-4:]
cursor = self.cursor_get()
cursor.execute(self.v("SELECT auth, pre_auth FROM users WHERE pre_auth = %s"), (pre_auth, ))
data = cursor.fetchall()
self.cursor_finish(cursor)
# 没有找到pre_auth
if len(data) == 0:
return '0' * 32
auth_s = data[0][0]
return auth_s
# Token 格式:salted + salt + pre_auth = (68)
def check_token(self, token):
if len(token) != 68:
return False
salted = token[:32]
salt = token[32:-4]
pre_auth = token[-4:]
cursor = self.cursor_get()
cursor.execute(self.v("SELECT auth, pre_auth FROM users WHERE pre_auth = %s"), (pre_auth, ))
data = cursor.fetchall()
self.cursor_finish(cursor)
# 没有找到pre_auth
if len(data) == 0:
return False
auth_s = data[0][0]
salted_s = hashlib.md5(("%s%s" % (auth_s, salt)).encode()).hexdigest()
if salted == salted_s:
return True
return False
def token2username(self, token):
if self.check_auth(token) is False:
return 'No_User'
auth = self.token_parse(token)
cursor = self.cursor_get()
cursor.execute(self.v("SELECT username FROM users WHERE auth = %s"), (auth, ))
username = cursor.fetchall()[0][0]
self.cursor_finish(cursor)
return username
def user_exist(self, username):
cursor = self.cursor_get()
cursor.execute(self.v("SELECT username FROM users WHERE username = %s"), (username, ))
data = cursor.fetchall()
self.cursor_finish(cursor)
if len(data) > 0:
return True
return False
def user_set_info(self, token, email: str = None):
if self.check_auth(token) is False:
return self.make_result(self.errors["Auth"])
cursor = self.cursor_get()
username = self.token2username(token)
if email is not None:
cursor.execute(self.v("UPDATE users SET email = %s WHERE username = %s"), (email, username))
self.cursor_finish(cursor)
return self.make_result(0)
def user_get_info(self, username):
if not self.user_exist(username):
return self.make_result(self.errors['NoUser'])
cursor = self.cursor_get()
cursor.execute(self.v("SELECT username, email, created_at, blog_title, blog_url WHERE username = %s"),
(username, ))
data = cursor.fetchall()[0]
self.cursor_finish(cursor)
return self.make_result(0, user_info={
'username': data[0], 'email': data[1], 'created_at': data[2], 'blog_title': data[3],
'blog_url': data[4]
})
def file_upload(self, token, filename: str = 'FILE', url: str = '', filesize: int=0):
if self.check_auth(token) is False:
return self.make_result(self.errors["Auth"])
username = self.token2username(token)
cursor = self.cursor_get()
uptime = int(time.time())
cursor.execute(self.v("INSERT INTO files (username, filename, url, uptime, filesize) "
"VALUES (%s, %s, %s, %s, %s)"),
(username, filename, url, str(uptime), filesize))
self.cursor_finish(cursor)
return self.make_result(0)
def file_get(self, token):
if self.check_auth(token) is False:
return self.make_result(self.errors["Auth"])
username = self.token2username(token)
cursor = self.cursor_get()
cursor.execute(self.v("SELECT DISTINCT username, filename, url, uptime, filesize FROM files "
"WHERE username = %s ORDER BY filename "),
(username, ))
data = cursor.fetchall()
self.cursor_finish(cursor)
result = []
for d in data:
result.append({'username': d[0], 'filename': d[1], 'url': d[2], 'uptime': d[3], 'filesize': d[4]})
return self.make_result(0, files=result)
def jsonify(string: str):
return json.loads(string)
def decode_login_token(login_token):
if len(login_token) != 68:
return '0' * 32
auth_mix = login_token[:32]
order = login_token[32:64]
orderd = []
for i in range(0, len(order), 2):
orderd.append({'num': int(order[i:i+2], 16), 'key': i//2})
orderd.sort(key=lambda x: x['num'])
auth = ''
for i in orderd:
auth = auth + "%02x" % (0xff - int(auth_mix[i['key']*2:i['key']*2+2], 16))
return auth
def make_token(auth):
salt = '%032x' % random.randint(0, 1 << (4 * 32))
salted = hashlib.md5(("%s%s" % (auth, salt)).encode()).hexdigest()
token = "%s%s%s" % (salted, salt, auth[:4])
return token
if __name__ == '__main__':
db = DataBase()
db.db_init()
db.create_user(username='Lance', password='')
_au = db.create_auth(username='Lance', password='')
print(_au)
_au = jsonify(_au)['data']['login_token']['login_token']
_au = decode_login_token(_au)
print(_au)
_token = make_token(auth=_au)
print(db.check_auth(auth=_token))
print(db.file_get(token=_token))
exit(0)
print(db.file_upload(_au, filename='Name', filesize=32, url='https://baidu.com/index.html'))
print(db.file_get(_au))
|
import pytest
from django.urls import resolve, reverse
from fahari.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username}) == f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['texture', 'target', 'origtexture', 'internalformat', 'minlevel', 'numlevels', 'minlayer', 'numlayers'])
def glTextureViewEXT(texture, target, origtexture, internalformat, minlevel, numlevels, minlayer, numlayers):
pass
|
from sklearn.preprocessing._function_transformer import FunctionTransformer as Op
import lale.helpers
import lale.operators
import lale.docstrings
from numpy import nan, inf
class FunctionTransformerImpl():
def __init__(self, func=None, inverse_func=None, validate=None, accept_sparse=False, pass_y='deprecated', check_inverse=True, kw_args=None, inv_kw_args=None):
self._hyperparams = {
'func': func,
'inverse_func': inverse_func,
'validate': validate,
'accept_sparse': accept_sparse,
'pass_y': pass_y,
'check_inverse': check_inverse,
'kw_args': kw_args,
'inv_kw_args': inv_kw_args}
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if (y is not None):
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'inherited docstring for FunctionTransformer Constructs a transformer from an arbitrary callable.',
'allOf': [{
'type': 'object',
'required': ['func', 'inverse_func', 'validate', 'accept_sparse', 'pass_y', 'check_inverse', 'kw_args', 'inv_kw_args'],
'relevantToOptimizer': ['accept_sparse', 'pass_y'],
'additionalProperties': False,
'properties': {
'func': {
'anyOf': [{
'type': 'object'}, {
'enum': [None]}],
'default': None,
'description': 'The callable to use for the transformation'},
'inverse_func': {
'anyOf': [{
'type': 'object'}, {
'enum': [None]}],
'default': None,
'description': 'The callable to use for the inverse transformation'},
'validate': {
'anyOf': [{
'type': 'boolean'}, {
'enum': [None]}],
'default': None,
'description': 'Indicate that the input X array should be checked before calling ``func``'},
'accept_sparse': {
'type': 'boolean',
'default': False,
'description': 'Indicate that func accepts a sparse matrix as input'},
'pass_y': {
'anyOf': [{
'type': 'boolean'}, {
'enum': ['deprecated']}],
'default': 'deprecated',
'description': 'Indicate that transform should forward the y argument to the inner callable'},
'check_inverse': {
'type': 'boolean',
'default': True,
'description': 'Whether to check that or ``func`` followed by ``inverse_func`` leads to the original inputs'},
'kw_args': {
'anyOf': [{
'type': 'object'}, {
'enum': [None]}],
'default': None,
'description': 'Dictionary of additional keyword arguments to pass to func.'},
'inv_kw_args': {
'anyOf': [{
'type': 'object'}, {
'enum': [None]}],
'default': None,
'description': 'Dictionary of additional keyword arguments to pass to inverse_func.'},
}}],
}
_input_fit_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Fit transformer by checking X.',
'type': 'object',
'required': ['X'],
'properties': {
'X': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number'},
},
'description': 'Input array.'},
},
}
_input_transform_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Transform X using the forward function.',
'type': 'object',
'required': ['X'],
'properties': {
'X': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number'},
},
'description': 'Input array.'},
'y': {
'laleType': 'Any',
'XXX TODO XXX': '(ignored)',
'description': ''},
},
}
_output_transform_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Transformed input.',
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number'},
},
}
_combined_schemas = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Combined schema for expected data and hyperparameters.',
'documentation_url': 'https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.FunctionTransformer#sklearn-preprocessing-functiontransformer',
'type': 'object',
'tags': {
'pre': [],
'op': ['transformer'],
'post': []},
'properties': {
'hyperparams': _hyperparams_schema,
'input_fit': _input_fit_schema,
'input_transform': _input_transform_schema,
'output_transform': _output_transform_schema},
}
lale.docstrings.set_docstrings(FunctionTransformerImpl, _combined_schemas)
FunctionTransformer = lale.operators.make_operator(FunctionTransformerImpl, _combined_schemas)
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NumberOfEntities:
__slots__ = [
'num_subscriptions',
'num_guard_conditions',
'num_timers',
'num_clients',
'num_services']
def __init__(self, num_subs=0, num_gcs=0, num_timers=0, num_clients=0, num_services=0):
self.num_subscriptions = num_subs
self.num_guard_conditions = num_gcs
self.num_timers = num_timers
self.num_clients = num_clients
self.num_services = num_services
def __add__(self, other):
result = self.__class__()
for attr in result.__slots__:
left = getattr(self, attr)
right = getattr(other, attr)
setattr(result, attr, left + right)
return result
def __repr__(self):
return '<{0}({1}, {2}, {3}, {4}, {5})>'.format(
self.__class__.__name__, self.num_subscriptions, self.num_guard_conditions,
self.num_timers, self.num_clients, self.num_services)
class Waitable:
"""
Add something to a wait set and execute it.
This class wraps a collection of entities which can be added to a wait set.
"""
def __init__(self, callback_group):
# A callback group to control when this entity can execute (used by Executor)
self.callback_group = callback_group
self.callback_group.add_entity(self)
# Flag set by executor when a handler has been created but not executed (used by Executor)
self._executor_event = False
# List of Futures that have callbacks needing execution
self._futures = []
def add_future(self, future):
self._futures.append(future)
def remove_future(self, future):
self._futures.remove(future)
def is_ready(self, wait_set):
"""Return True if entities are ready in the wait set."""
raise NotImplementedError('Must be implemented by subclass')
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
raise NotImplementedError('Must be implemented by subclass')
async def execute(self, taken_data):
"""Execute work after data has been taken from a ready wait set."""
raise NotImplementedError('Must be implemented by subclass')
def get_num_entities(self):
"""Return number of each type of entity used."""
raise NotImplementedError('Must be implemented by subclass')
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
raise NotImplementedError('Must be implemented by subclass')
|
from math import pi, sqrt, sin, cos
from raytracer.tuple import (
tuple,
point,
vector,
magnitude,
normalize,
dot,
cross,
Color,
)
from raytracer.canvas import canvas
from raytracer.util import equal
from raytracer.matrices import Matrix, I
from raytracer.transformations import (
translation,
scaling,
rotation_x,
rotation_y,
rotation_z,
shearing,
view_transform
)
from raytracer.rays import Ray
from raytracer.spheres import Sphere
from raytracer.planes import Plane
from raytracer.lights import PointLight
from raytracer.materials import Material, lighting
from raytracer.camera import Camera
from raytracer.world import World, default_world
def render():
floor = Plane()
# floor.transform = scaling(10, 0.01, 10)
# floor.material = Material()
# floor.material.color = Color(1, 0.9, 0.9)
# floor.material.specular = 0
ceiling = Plane()
ceiling.transform = translation(0, 15, 0)
ceiling.material = Material()
ceiling.material.color = Color(0.1, 0.1, 0.5)
ceiling.material.ambient = 0.9
ceiling.material.diffuse = 1
ceiling.material.specular = 0
left_wall = Plane()
left_wall.transform = translation(0, 0, 5) @ rotation_y(-pi/4) @ rotation_x(pi/2)
left_wall.material = Material()
left_wall.material.color = Color(1.0, 0.2, 0.2)
left_wall.material.specular = 0
# right_wall = Sphere()
# right_wall.transform = translation(0, 0, 5) @ rotation_y(pi/4) @ rotation_x(pi/2) @ scaling(10, 0.01, 10)
# right_wall.material = floor.material
# create objects that will be in the scene
middle = Sphere()
middle.transform = translation(-0.5, 0, 0.5)
middle.material = Material()
middle.material.color = Color(0.1, 1, 0.5)
middle.material.diffuse = 0.7
middle.material.specular = 0.3
right = Sphere()
right.transform = translation(1.5, 0.5, -0.5) @ scaling(0.5, 0.5, 0.5)
right.material = Material()
right.material.color = Color(0.5, 1, 0.1)
right.material.diffuse = 0.7
right.material.specular = 0.3
left = Sphere()
left.transform = translation(-1.5, 0.33, -0.75) @ scaling(0.33, 0.33, 0.33)
left.material = Material()
left.material.color = Color(1, 0.8, 0.1)
left.material.diffuse = 0.7
left.material.specular = 0.3
# create world
world = World()
world.light = PointLight(point(-10, 10, -10), Color(1, 1, 1))
world.add_object(floor)
world.add_object(ceiling)
# world.add_object(left_wall)
# world.add_object(right_wall)
world.add_object(middle)
world.add_object(right)
world.add_object(left)
for i in range(30):
s = Sphere()
s.transform = translation(-2.0, cos(i)+0.5, i) @ scaling(0.33, 0.33, 0.33)
s.material = Material()
s.material.color = Color(0.7, 0.1, 0.1)
s.material.diffuse = 0.7
s.material.specular = 0.3
world.add_object(s)
# create camera
scale = 4
camera = Camera(100*scale, 50*scale, pi/3)
camera.transform = view_transform(
point(0, 1.5, -5),
point(0, 1, 0),
vector(0, 1, 0)
)
# render
image = camera.render(world, verbose=True)
return image
render().save("chapter9_planes.ppm")
|
#!/usr/bin/env python3
'''This is a copy of the python script that bashtop starts in a coprocess when using psutil for data collection'''
import os, sys, subprocess, re, time, psutil
from datetime import timedelta
from collections import defaultdict
from typing import List, Set, Dict, Tuple, Optional, Union
system: str
if "linux" in sys.platform: system = "Linux"
elif "bsd" in sys.platform: system = "BSD"
elif "darwin" in sys.platform: system = "MacOS"
else: system = "Other"
parent_pid: int = psutil.Process(os.getpid()).ppid()
allowed_commands: Tuple[str] = (
'get_proc',
'get_disks',
'get_cpu_name',
'get_cpu_cores',
'get_nics',
'get_cpu_cores',
'get_cpu_usage',
'get_cpu_freq',
'get_uptime',
'get_load_avg',
'get_mem',
'get_detailed_names_cmd',
'get_detailed_mem_time',
'get_net',
'get_cmd_out',
'get_sensors',
'get_sensors_check',
'get_ms'
)
command: str = ''
cpu_count: int = psutil.cpu_count()
disk_hist: Dict = {}
def cleaned(string: str) -> str:
'''Escape characters not suitable for "echo -e" in bash'''
return string.replace("\\", "\\\\").replace("$", "\\$").replace("\n", "\\n").replace("\t", "\\t").replace("\"", "\\\"").replace("\'", "\\\'")
def get_cmd_out(cmd: str):
'''Save bash the trouble of creating child processes by running through python instead'''
print(subprocess.check_output(cmd, shell=True, universal_newlines=True).rstrip())
def get_ms():
'''Get current epoch millisecond'''
t = str(time.time()).split(".")
print(f'{t[0]}{t[1][:3]}')
def get_sensors():
'''A clone of "sensors" but using psutil'''
temps = psutil.sensors_temperatures()
if not temps:
return
try:
for name, entries in temps.items():
print(name)
for entry in entries:
print(f'{entry.label or name}: {entry.current}°C (high = {entry.high}°C, crit = {entry.critical}°C)')
print()
except:
pass
def get_sensors_check():
'''Check if get_sensors() output contains accepted CPU temperature values'''
if not hasattr(psutil, "sensors_temperatures"): print("false"); return
try:
temps = psutil.sensors_temperatures()
except:
pass
print("false"); return
if not temps: print("false"); return
try:
for _, entries in temps.items():
for entry in entries:
if entry.label.startswith(('Package', 'Core 0', 'Tdie')):
print("true")
return
except:
pass
print("false")
def get_cpu_name():
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
command: str = ""
all_info: str = ""
rem_line: str = ""
if system == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif system == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif system == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
all_info = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
if rem_line:
for line in all_info.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = all_info
if "Xeon" in name:
name = name.split(" ")
name = name[name.index("CPU")+1]
elif "Ryzen" in name:
name = name.split(" ")
name = " ".join(name[name.index("Ryzen"):name.index("Ryzen")+3])
elif "CPU" in name:
name = name.split(" ")
name = name[name.index("CPU")-1]
print(name)
def get_cpu_cores():
'''Get number of CPU cores and threads'''
cores: int = psutil.cpu_count(logical=False)
threads: int = psutil.cpu_count(logical=True)
print(f'{cores} {threads if threads else cores}')
def get_cpu_usage():
cpu: float = psutil.cpu_percent(percpu=False)
threads: List[float] = psutil.cpu_percent(percpu=True)
print(f'{cpu:.0f}')
for thread in threads:
print(f'{thread:.0f}')
def get_cpu_freq():
'''Get current CPU frequency'''
try:
print(f'{psutil.cpu_freq().current:.0f}')
except:
print(0)
def get_uptime():
'''Get current system uptime'''
print(str(timedelta(seconds=round(time.time()-psutil.boot_time(),0)))[:-3])
def get_load_avg():
'''Get CPU load average'''
for lavg in os.getloadavg():
print(round(lavg, 2), ' ', end='')
print()
def get_mem():
'''Get current system memory and swap usage'''
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
try:
cmem = mem.cached>>10
except:
cmem = mem.active>>10
print(mem.total>>10, mem.free>>10, mem.available>>10, cmem, swap.total>>10, swap.free>>10)
def get_nics():
'''Get a list of all network devices sorted by highest throughput'''
io_all = psutil.net_io_counters(pernic=True)
up_stat = psutil.net_if_stats()
for nic in sorted(psutil.net_if_addrs(), key=lambda nic: (io_all[nic].bytes_recv + io_all[nic].bytes_sent), reverse=True):
if up_stat[nic].isup is False:
continue
print(nic)
def get_net(net_dev: str):
'''Emulated /proc/net/dev for selected network device'''
net = psutil.net_io_counters(pernic=True)[net_dev]
print(0,net.bytes_recv,0,0,0,0,0,0,0,net.bytes_sent)
def get_detailed_names_cmd(pid: int):
'''Get name, parent name, username and arguments for selected pid'''
p = psutil.Process(pid)
pa = psutil.Process(p.ppid())
with p.oneshot():
print(p.name())
print(pa.name())
print(p.username())
cmd = ' '.join(p.cmdline()) or '[' + p.name() + ']'
print(cleaned(cmd))
def get_detailed_mem_time(pid: int):
'''Get memory usage and runtime for selected pid'''
p = psutil.Process(pid)
with p.oneshot():
print(p.memory_info().rss)
print(timedelta(seconds=round(time.time()-p.create_time(),0)))
def get_proc(sorting='cpu lazy', tree=False, prog_len=0, arg_len=0, search='', reverse=True, proc_per_cpu=True, max_lines=0):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
line_count: int = 0
err: float = 0.0
reverse = not reverse
if sorting == 'pid':
sort_cmd = "p.info['pid']"
elif sorting == 'program' or tree and sorting == "arguments":
sort_cmd = "p.info['name']"
reverse = not reverse
elif sorting == 'arguments':
sort_cmd = "' '.join(str(p.info['cmdline'])) or p.info['name']"
reverse = not reverse
elif sorting == 'threads':
sort_cmd = "str(p.info['num_threads'])"
elif sorting == 'user':
sort_cmd = "p.info['username']"
reverse = not reverse
elif sorting == 'memory':
sort_cmd = "str(p.info['memory_percent'])"
elif sorting == 'cpu responsive':
sort_cmd = "p.info['cpu_percent']" if proc_per_cpu else "(p.info['cpu_percent'] / cpu_count)"
else:
sort_cmd = "(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time.time() - p.info['create_time']))"
if tree:
proc_tree(width=prog_len + arg_len, sorting=sort_cmd, reverse=reverse, max_lines=max_lines, proc_per_cpu=proc_per_cpu, search=search)
return
print(f"{'Pid:':>7} {'Program:':<{prog_len}}", f"{'Arguments:':<{arg_len-4}}" if arg_len else '', f"{'Threads:' if arg_len else ' Tr:'} {'User:':<9}Mem%{'Cpu%':>11}", sep='')
for p in sorted(psutil.process_iter(['pid', 'name', 'cmdline', 'num_threads', 'username', 'memory_percent', 'cpu_percent', 'cpu_times', 'create_time'], err), key=lambda p: eval(sort_cmd), reverse=reverse):
if p.info['name'] == 'idle' or p.info['name'] == err or p.info['pid'] == err:
continue
if p.info['cmdline'] == err:
p.info['cmdline'] = ""
if p.info['username'] == err:
p.info['username'] = "?"
if p.info['num_threads'] == err:
p.info['num_threads'] = 0
if search:
found = False
for value in [ p.info['name'], ' '.join(p.info['cmdline']), str(p.info['pid']), p.info['username'] ]:
if search in value:
found = True
break
if not found:
continue
cpu = p.info['cpu_percent'] if proc_per_cpu else (p.info['cpu_percent'] / psutil.cpu_count())
mem = p.info['memory_percent']
cmd = ' '.join(p.info['cmdline']) or '[' + p.info['name'] + ']'
print(f"{p.info['pid']:>7} ",
f"{cleaned(p.info['name']):<{prog_len}.{prog_len-1}}",
f"{cleaned(cmd):<{arg_len}.{arg_len-1}}" if arg_len else '',
f"{p.info['num_threads']:>4} " if p.info['num_threads'] < 1000 else '999> ',
f"{p.info['username']:<9.9}" if len(p.info['username']) < 10 else f"{p.info['username'][:8]:<8}+",
f"{mem:>4.1f}" if mem < 100 else f"{mem:>4.0f} ",
f"{cpu:>11.1f} " if cpu < 100 else f"{cpu:>11.0f} ",
sep='')
line_count += 1
if max_lines and line_count == max_lines:
break
def proc_tree(width: int, sorting: str = 'cpu lazy', reverse: bool = True, max_lines: int = 0, proc_per_cpu=True, search=''):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
tree_line_count: int = 0
err: float = 0.0
def create_tree(parent: int, tree, indent: str = '', inindent: str = ' ', found: bool = False):
nonlocal infolist, tree_line_count, max_lines, tree_width, proc_per_cpu, search
cont: bool = True
if max_lines and tree_line_count >= max_lines:
return
try:
name: str = psutil.Process(parent).name()
if name == "idle": return
except psutil.Error:
pass
name: str = ''
try:
getinfo: Dict = infolist[parent]
except:
pass
getinfo: bool = False
if search and not found:
for value in [ name, str(parent), getinfo['username'] if getinfo else '' ]:
if search in value:
found = True
break
if not found:
cont = False
if cont: print(f"{f'{inindent}{parent} {cleaned(name)}':<{tree_width}.{tree_width-1}}", sep='', end='')
if getinfo and cont:
if getinfo['cpu_times'] == err:
getinfo['num_threads'] = 0
if p.info['username'] == err:
p.info['username'] = "?"
cpu = getinfo['cpu_percent'] if proc_per_cpu else (getinfo['cpu_percent'] / psutil.cpu_count())
print(f"{getinfo['num_threads']:>4} " if getinfo['num_threads'] < 1000 else '999> ',
f"{getinfo['username']:<9.9}" if len(getinfo['username']) < 10 else f"{getinfo['username'][:8]:<8}+",
f"{getinfo['memory_percent']:>4.1f}" if getinfo['memory_percent'] < 100 else f"{getinfo['memory_percent']:>4.0f} ",
f"{cpu:>11.1f} " if cpu < 100 else f"{cpu:>11.0f} ",
sep='')
elif cont:
print(f"{'':>14}{'0.0':>4}{'0.0':>11} ", sep='')
tree_line_count += 1
if parent not in tree:
return
children = tree[parent][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found)
if max_lines and tree_line_count >= max_lines:
break
child = tree[parent][-1]
create_tree(child, tree, indent + " ", indent + " └─ ")
infolist: Dict = {}
tree: List = defaultdict(list)
for p in sorted(psutil.process_iter(['pid', 'name', 'num_threads', 'username', 'memory_percent', 'cpu_percent', 'cpu_times', 'create_time'], err), key=lambda p: eval(sorting), reverse=reverse):
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
tree_width: int = width + 8
print(f"{' Tree:':<{tree_width-4}}", 'Threads: ', f"{'User:':<9}Mem%{'Cpu%':>11}", sep='')
create_tree(min(tree), tree)
def get_disks(exclude: str = None, filtering: str = None):
'''Get stats, current read and current write for all disks'''
global disk_hist
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
disk_list: List[str] = []
excludes: List[str] = []
if exclude: excludes = exclude.split(' ')
if system == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
if filtering: filtering: Tuple[str] = tuple(filtering.split(' '))
io_counters = psutil.disk_io_counters(perdisk=True if system == "Linux" else False, nowrap=True)
print("Ignored line")
for disk in psutil.disk_partitions():
disk_io = None
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
while disk_name in disk_list: disk_name += "_"
disk_list += [disk_name]
if excludes and disk.fstype in excludes or filtering and not disk_name.endswith(filtering):
continue
if system == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
print(f'{disk.device} {disk_u.total >> 10} {disk_u.used >> 10} {disk_u.free >> 10} {disk_u.percent:.0f} ', end='')
try:
if system == "Linux":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if dev_name.startswith("md"):
try:
dev_name = dev_name[:dev_name.index("p")]
except:
pass
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = disk_io.read_bytes
disk_write = disk_io.write_bytes
disk_read -= disk_hist[disk.device][0]
disk_write -= disk_hist[disk.device][1]
except:
pass
disk_read = 0
disk_write = 0
if disk_io: disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
print(f'{disk_read >> 10} {disk_write >> 10} {disk_name}')
#* The script takes input over coproc pipes and runs command if in the accepted commands list
while command != 'quit':
if not psutil.pid_exists(parent_pid):
quit()
try:
command = input()
except:
pass
quit()
if not command or command == 'test':
continue
elif command.startswith(allowed_commands):
try:
exec(command)
except Exception as e:
pass
print()
print('/ERROR')
print(f'PSUTIL ERROR! Command: {command}\n{e}', file=sys.stderr)
else:
continue
print('/EOL')
#print(f'{command}', file=sys.stderr)
|
#! /usr/bin/python3 -i
import unicodedata
from tokenizers import Tokenizer,models,pre_tokenizers,normalizers,decoders,trainers
from transformers import RemBertTokenizerFast,AutoTokenizer
tkz=AutoTokenizer.from_pretrained("KoichiYasuoka/bert-base-japanese-luw-upos")
alp=[c for c in tkz.convert_ids_to_tokens([i for i in range(len(tkz))]) if len(c)==1 and unicodedata.name(c).startswith("CJK UNIFIED")]
pst=tkz.backend_tokenizer.post_processor
tkz=Tokenizer(models.Unigram())
tkz.pre_tokenizer=pre_tokenizers.Whitespace()
tkz.normalizer=normalizers.Sequence([normalizers.Nmt(),normalizers.NFKC()])
trn=trainers.UnigramTrainer(vocab_size=250300,special_tokens=["[PAD]","[UNK]","[CLS]","[SEP]","[MASK]","<special0>","<special1>","<special2>","<special3>","<special4>","<special5>","<special6>","<special7>","<special8>","<special9>"],initial_alphabet=alp,unk_token="[UNK]",max_piece_length=16,n_sub_iterations=2)
tkz.train(files=["udja.luw.txt","aozora.luw.txt","aug.luw.txt"],trainer=trn)
tkz.post_processor=pst
tkz.decoder=decoders.WordPiece(prefix="",cleanup=True)
tkz.save("tokenizer.json")
tokenizer=RemBertTokenizerFast(tokenizer_file="tokenizer.json",vocab_file="/dev/null",bos_token="[CLS]",cls_token="[CLS]",unk_token="[UNK]",pad_token="[PAD]",mask_token="[MASK]",sep_token="[SEP]",do_lower_case=False,keep_accents=True)
tokenizer.save_pretrained("Japanese-LUW-Tokenizer")
|
from rs4 import asynchat, asyncore
import re, os, sys
import ssl
import socket
import time
import zlib
from warnings import warn
from errno import ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EWOULDBLOCK
if os.name == "nt":
from errno import WSAENOTCONN
import select
import threading
from . import adns
from ..protocols.http2 import H2_PROTOCOLS
from ..athreads.fifo import await_fifo
from rs4.ssl_ import resolve_cert_reqs, resolve_ssl_version, create_urllib3_context
from collections import deque
from ..protocols.http import respcodes
import random
DEBUG = 0
DEFAULT_ZOMBIE_TIMEOUT = 60
DEFAULT_KEEP_ALIVE = 2
class SocketPanic (Exception): pass
class TimeOut (Exception): pass
class AsynConnect (asynchat.async_chat):
ac_in_buffer_size = 65535
ac_out_buffer_size = 65535
zombie_timeout = DEFAULT_ZOMBIE_TIMEOUT
keep_alive = DEFAULT_KEEP_ALIVE
request_count = 0
active = 0
fifo_class = deque
keep_connect = True
is_ssl = False
def __init__ (self, address, lock = None, logger = None):
self.address = address
self.lock = lock
self.logger = logger
self._cv = threading.Condition ()
self.__sendlock = None
self.__no_more_request = False
self.set_event_time ()
self.handler = None
self.auth = None
self.proxy = False
self.initialize_connection ()
self._closed = False
self.backend = False
self.ac_in_buffer = b''
self.incoming = []
self.producer_fifo = self.fifo_class ()
asyncore.dispatcher.__init__(self)
def __repr__ (self):
return "<AsynConnect %s:%d>" % self.address
def duplicate (self):
new_asyncon = self.__class__ (self.address, self.lock, self.logger)
# used in socketpool
new_asyncon.proxy = self.proxy
# used in skitai cluster manager
new_asyncon.keep_alive = self.keep_alive
new_asyncon.auth = self.auth
new_asyncon.backend = self.backend
new_asyncon.keep_connect = self.keep_connect
return new_asyncon
def set_backend (self, backend_keep_alive = 10):
self.backend = True
self.keep_alive = backend_keep_alive
def set_auth (self, auth):
self.auth = auth
def get_auth (self):
return self.auth
def close (self):
# sometimes socket is not closed at once
# possibly related with SSL socket
# then prevent doble callbacking in request_handler
if self.socket:
# self.socket is still None, when DNS not found
asynchat.async_chat.close (self)
self._fileno = None
# re-init asychat
self.ac_in_buffer = b''
self.incoming = []
self.producer_fifo.clear()
self._proto = None
self._closed = True
if not self.handler:
# return to the pool
return self.set_active (False)
if not self.errcode:
# disconnect intentionally
return
handler, self.handller = self.handler, None
keep_active = False
try:
keep_active = handler.connection_closed (self.errcode, self.errmsg)
except:
self.trace ()
if not keep_active:
#print (handler.request.meta ['sid'], 'not keepalive...')
self.set_active (False)
if self.errcode not in (704, 712, 722):
# controlled shutdown
self.logger (
".....socket %s has been closed (reason: %d)" % ("%s:%d" % self.address, self.errcode),
"info"
)
# DO NOT Change any props, because may be request has been restarted
def end_tran (self):
if DEBUG:
self.logger ('end_tran {rid:%s} %s' % (self.handler.request.meta.get ('req_id', -1), self.handler.request.uri), 'debug')
if not self.backend:
self.del_channel ()
self.set_active (False)
if not self.keep_connect:
self.disconnect ()
def use_sendlock (self):
self.__sendlock = threading.Lock ()
self.initiate_send = self._initiate_send_ts
def _initiate_send_ts (self):
with self.__sendlock:
return asynchat.async_chat.initiate_send (self)
def get_proto (self):
with self.lock:
p = self._proto
return p
def set_proto (self, proto):
with self.lock:
self._proto = proto
def get_history (self):
return self.__history
def initialize_connection (self):
self._raised_ENOTCONN = 0 # for win32
self.__history = []
self._proto = None
self._handshaking = False
self._handshaked = False
self.established = False
self.upgraded = False
def set_event_time (self):
self.event_time = time.time ()
def is_proxy (self):
return self.proxy
def log (self, msg, logtype):
if self.handler is not None and hasattr (self.handler, "log"):
self.handler.log (msg, logtype)
elif self.logger:
self.logger (msg, logtype)
else:
warn ("No logger")
def trace (self):
if self.handler is not None and hasattr (self.handler, "trace"):
self.handler.trace ()
elif self.logger:
self.logger.trace ()
else:
warn ("No logger for traceback")
def clean_shutdown_control (self, phase, time_in_this_phase):
self.__no_more_request = True
if self.isactive () or (self.handler and self.handler.working ()):
return 1
else:
self.handle_close (712, "Controlled Shutdown")
self.__no_more_request = False
return 0
def is_channel_in_map (self, map = None):
if map is None:
map = self._map
return self._fileno in map
def set_active (self, flag, nolock = False):
if flag:
flag = time.time ()
else:
flag = 0
if nolock or self.lock is None:
self.active = flag
if not flag: self.set_timeout (self.keep_alive)
return
self.lock.acquire ()
self.active = flag
self.request_count += 1
if not flag: self.set_timeout (self.keep_alive)
self.lock.release ()
def get_active (self, nolock = False):
if nolock or self.lock is None:
return self.active
self.lock.acquire ()
active = self.active
self.lock.release ()
return active
def isactive (self):
return self.get_active () > 0
def isconnected (self):
with self.lock:
r = self.connected
return r
def get_request_count (self):
return self.request_count
def del_channel (self, map = None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
del map[fd]
def create_socket (self, family, type):
self.family_and_type = family, type
sock = socket.socket (family, type)
sock.setblocking (0)
self.set_socket (sock)
def connect (self):
if adns.query:
adns.query (self.address [0], "A", callback = self.continue_connect)
else:
self.continue_connect ()
def continue_connect (self, answer = None):
self.initialize_connection ()
if not adns.query:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
try: asynchat.async_chat.connect (self, self.address)
except: self.handle_error (714)
return
ipaddr = answer [-1]["data"]
#print (self.handler.request.meta ['sid'], ipaddr, 'continue_connect...')
if not ipaddr:
return self.handle_close (704)
else:
port = self.address [1]
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
try:
asynchat.async_chat.connect (self, (ipaddr, port))
except:
self.handle_error (714)
def recv (self, buffer_size):
try:
data = self.socket.recv (buffer_size)
if not data:
self.handle_close (700, "Connection closed unexpectedly in recv")
return b''
else:
self.set_event_time ()
return data
except socket.error as why:
if why.errno in asyncore._DISCONNECTED:
self.handle_close (700, "Connection closed unexpectedly in recv")
return b''
else:
raise
def send (self, data):
try:
numsent = self.socket.send (data)
if numsent:
self.set_event_time ()
return numsent
except socket.error as why:
if why.errno == EWOULDBLOCK:
return 0
elif why.errno in asyncore._DISCONNECTED:
#print (">>>>>>>>>> why.errno == asyncore.ENOTCONN", why.errno == asyncore.ENOTCONN)
if os.name == "nt" and why.errno == asyncore.ENOTCONN:
# winsock sometimes raise ENOTCONN and sometimes recovered.
# Found this error at http://file.hungryboarder.com:8080/HBAdManager/pa.html?siteId=hboarder&zoneId=S-2
if self._raised_ENOTCONN <= 3:
self._raised_ENOTCONN += 1
return 0
else:
self._raised_ENOTCONN = 0
self.handle_close (700, "Connection closed unexpectedly in send")
return 0
else:
raise
def close_if_over_keep_live (self):
if time.time () - self.event_time > self.keep_alive:
self.disconnect ()
def set_timeout (self, timeout = 10):
# CAUTION: used at proxy.tunnel_handler
self.zombie_timeout = timeout
def set_keep_alive (self, keep_alive = 10):
self.keep_alive = keep_alive
def handle_connect (self):
if hasattr (self.handler, "has_been_connected"):
self.handler.has_been_connected ()
def handle_expt (self):
self.handle_close (703)
def handle_error (self, code = 701):
self.trace ()
self.handle_close (code)
def handle_timeout (self):
self.handle_close (702)
def handle_expt_event (self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.handle_close (703, "Socket %d Error" % err)
else:
self.handle_expt ()
def maintern (self, object_timeout):
if time.time () - self.event_time > object_timeout:
if self.handler:
if hasattr (self.handler, "control_shutdown"):
self.handler.control_shutdown ()
self.handle_close (722)
else:
self.disconnect ()
return True
return False
# proxy POST need no init_send
def push (self, thing, init_send = True):
if self.connected:
self.close_if_over_keep_live () # check keep-alive
if isinstance(thing, (bytes, bytearray, memoryview)):
asynchat.async_chat.push (self, thing)
else:
self.push_with_producer (thing, init_send)
def push_with_producer (self, producer, init_send = True):
if self.connected:
self.close_if_over_keep_live () # check keep-alive
self.producer_fifo.append (producer)
if init_send:
self.initiate_send ()
def handle_abort (self):
self.handler = None
self.close ()
def handle_close (self, code = 700, msg = ""):
if code == 0: msg = ""
self.errcode = code
if msg:
self.errmsg = msg
else:
self.errmsg = respcodes.get (code, "Undefined Error")
self.close ()
def collect_incoming_data (self, data):
if not self.handler:
if self.connected:
self.logger ("recv data but no handler, droping data %d" % len (data), "warn")
self.disconnect ()
return
self.handler.collect_incoming_data (data)
def found_terminator (self):
if not self.handler:
if self.connected:
self.logger ("found terminator but no handler", "warn")
self.disconnect ()
return # already closed
self.handler.found_terminator ()
def disconnect (self):
# no error
self.handle_close (0)
def reconnect (self):
self.disconnect ()
self.connect ()
def set_proxy (self, flag = True):
self.proxy = flag
def begin_tran (self, handler):
if self.__no_more_request:
return self.handle_close (705)
# IMP: the reason why DNS error, _closed must be located here
self._closed = False
self.errcode = 0
self.errmsg = ""
self.handler = handler
if DEBUG:
self.logger ('begin_tran {rid:%s} %s' % (self.handler.request.meta.get ('req_id', -1), self.handler.request.uri), 'debug')
self.set_event_time ()
# IMP: call add_channel () AFTER push() otherwise threading issue will be raised
try:
if not self.connected:
#print (self.handler.request.meta ['sid'], 'connecting...')
self.connect ()
elif not self.backend:
#print (self.handler.request.meta ['sid'], 'add channel...')
self.add_channel ()
except:
self.handle_error ()
class AsynSSLConnect (AsynConnect):
is_ssl = True
def __init__ (self, address, lock = None, logger = None):
AsynConnect.__init__ (self, address, lock, logger)
self.ac_negotiate_http2 = True
def negotiate_http2 (self, flag):
self.ac_negotiate_http2 = flag
def handshake (self):
if not self._handshaking:
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise OSError(err, asyncore._strerror(err))
ssl_context = create_urllib3_context(ssl_version=resolve_ssl_version(None), cert_reqs=resolve_cert_reqs(None))
if self.ac_negotiate_http2:
try: ssl_context.set_alpn_protocols (H2_PROTOCOLS)
except AttributeError: ssl_context.set_npn_protocols (H2_PROTOCOLS)
self.socket = ssl_context.wrap_socket (self.socket, do_handshake_on_connect = False, server_hostname = self.address [0])
self._handshaking = True
try:
self.socket.do_handshake ()
except ssl.SSLError as why:
if why.args [0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
return False
raise ssl.SSLError(why)
try: self._proto = self.socket.selected_alpn_protocol()
except (AttributeError, NotImplementedError):
try: self._proto = self.socket.selected_npn_protocol()
except (AttributeError, NotImplementedError): pass
self._handshaked = True
return True
def handle_connect_event (self):
try:
if not self._handshaked and not self.handshake ():
return
except:
return self.handle_error (713)
AsynConnect.handle_connect_event (self)
def recv (self, buffer_size):
if self._closed:
# usually handshaking failure, already handled exception
return b''
try:
data = self.socket.recv (buffer_size)
if not data:
self.handle_close (700, "Connection closed unexpectedly")
return b''
else:
self.set_event_time ()
return data
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_READ:
try:
raise BlockingIOError
except NameError:
raise socket.error (EWOULDBLOCK)
# closed connection
elif why.errno in (ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_EOF):
self.handle_close (700, "Connection closed by SSL_ERROR_ZERO_RETURN or SSL_ERROR_EOF")
return b''
else:
raise
def send (self, data):
if self._closed:
# usually handshaking failure, already handled exception
return
try:
numsent = self.socket.send (data)
if numsent:
self.set_event_time ()
return numsent
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_WRITE:
return 0
elif why.errno == ssl.SSL_ERROR_ZERO_RETURN:
self.handle_close (700, "Connection closed by SSL_ERROR_ZERO_RETURN")
return 0
else:
raise
class AsynSSLProxyConnect (AsynSSLConnect, AsynConnect):
is_ssl = True
def __init__ (self, address, lock = None, logger = None):
AsynConnect.__init__ (self, address, lock, logger)
def handle_connect_event (self):
if self.established:
AsynSSLConnect.handle_connect_event (self)
else:
AsynConnect.handle_connect_event (self)
def recv (self, buffer_size):
if self._handshaked or self._handshaking:
return AsynSSLConnect.recv (self, buffer_size)
else:
return AsynConnect.recv (self, buffer_size)
def send (self, data):
if self._handshaked or self._handshaking:
return AsynSSLConnect.send (self, data)
else:
return AsynConnect.send (self, data)
|
import six
import copy
import json
class lazy_format(object):
__slots__ = ('fmt', 'args', 'kwargs')
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
def safe_issubclass(x, y):
"""Safe version of issubclass() that will not throw TypeErrors.
Invoking issubclass('object', some-abc.meta instances) will result
in the underlying implementation throwing TypeError's from trying to
memoize the result- 'object' isn't a usable weakref target at that level.
Unfortunately this gets exposed all the way up to our code; thus a
'safe' version of the function."""
try:
return issubclass(x, y)
except TypeError:
return False
def coerce_for_expansion(mapping):
"""Given a value, make sure it is usable for f(**val) expansion.
In py2.7, the value must be a dictionary- thus a as_dict() method
will be invoked if available. In py3k, the raw mapping is returned
unmodified."""
if six.PY2 and hasattr(mapping, 'as_dict'):
return mapping.as_dict()
return mapping
class ProtocolJSONEncoder(json.JSONEncoder):
def default(self, obj):
from python_jsonschema_objects import classbuilder
from python_jsonschema_objects import wrapper_types
if isinstance(obj, classbuilder.LiteralValue):
return obj._value
if isinstance(obj, wrapper_types.ArrayWrapper):
return obj.for_json()
if isinstance(obj, classbuilder.ProtocolBase):
props = {}
for raw, trans in six.iteritems(obj.__prop_names__):
props[raw] = getattr(obj, trans)
if props[raw] is None:
del props[raw]
for raw, data in six.iteritems(obj._extended_properties):
props[raw] = data
if props[raw] is None:
del props[raw]
return props
else:
return json.JSONEncoder.default(self, obj)
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == 'enum':
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == 'type':
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ('minLength', 'minimum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] > spval else spval)
elif subprop in ('maxLength', 'maximum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] < spval else spval)
elif subprop == 'multipleOf':
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError(
"Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops
def resolve_ref_uri(base, ref):
if ref[0] == '#':
# Local ref
uri = base.rsplit("#", 1)[0] + ref
else:
uri = ref
return uri
"""namespace module"""
__all__ = ("Namespace", "as_namespace")
from collections.abc import Mapping, Sequence
class _Dummy:
pass
CLASS_ATTRS = dir(_Dummy)
del _Dummy
class Namespace(dict):
"""A dict subclass that exposes its items as attributes.
Warning: Namespace instances do not have direct access to the
dict methods.
"""
def __init__(self, obj={}):
dict.__init__(self, obj)
def __dir__(self):
return list(self)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, super(dict, self).__repr__())
def __getattribute__(self, name):
try:
return self[name]
except KeyError:
msg = "'%s' object has no attribute '%s'"
raise AttributeError(msg % (type(self).__name__, name))
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
#------------------------
# "copy constructors"
@classmethod
def from_object(cls, obj, names=None):
if names is None:
names = dir(obj)
ns = {name: getattr(obj, name) for name in names}
return cls(ns)
@classmethod
def from_mapping(cls, ns, names=None):
if names:
ns = {name: ns[name] for name in names}
return cls(ns)
@classmethod
def from_sequence(cls, seq, names=None):
if names:
seq = {name: val for name, val in seq if name in names}
return cls(seq)
#------------------------
# static methods
@staticmethod
def hasattr(ns, name):
try:
object.__getattribute__(ns, name)
except AttributeError:
return False
return True
@staticmethod
def getattr(ns, name):
return object.__getattribute__(ns, name)
@staticmethod
def setattr(ns, name, value):
return object.__setattr__(ns, name, value)
@staticmethod
def delattr(ns, name):
return object.__delattr__(ns, name)
def as_namespace(obj, names=None):
# functions
if isinstance(obj, type(as_namespace)):
obj = obj()
# special cases
if isinstance(obj, type):
names = (name for name in dir(obj) if name not in CLASS_ATTRS)
return Namespace.from_object(obj, names)
if isinstance(obj, Mapping):
return Namespace.from_mapping(obj, names)
if isinstance(obj, Sequence):
return Namespace.from_sequence(obj, names)
# default
return Namespace.from_object(obj, names)
|
import os
import json
import collections
import re
import markdown
from . import config
from . import stixhelpers
from . import relationshiphelpers
from . import util
def generate():
"""Responsible for verifying group directory and starting off
group markdown generation
"""
# Verify if directory exists
if not os.path.isdir(config.group_markdown_path):
os.mkdir(config.group_markdown_path)
#Generates the markdown files to be used for page generation
generate_markdown_files()
def generate_markdown_files():
"""Responsible for generating group index page and getting shared data for
all groups
"""
data = {}
# Amount of characters per category
group_by = 2
side_menu_data = util.get_side_menu_data("Groups", "/groups/", config.group_list)
data['side_menu_data'] = side_menu_data
side_menu_mobile_view_data = util.get_side_menu_mobile_view_data("groups", "/groups/", config.group_list, group_by)
data['side_menu_mobile_view_data'] = side_menu_mobile_view_data
data['groups_table'] = get_groups_table_data()
data['groups_list_len'] = str(len(config.group_list))
subs = config.group_index_md + json.dumps(data)
with open(os.path.join(config.group_markdown_path, "overview.md"), "w", encoding='utf8') as md_file:
md_file.write(subs)
#Create the markdown for the enterprise groups in the STIX
for group in config.group_list:
generate_group_md(group, side_menu_data, side_menu_mobile_view_data)
def generate_group_md(group, side_menu_data, side_menu_mobile_view_data):
"""Responsible for generating markdown of all groups"""
attack_id = util.get_attack_id(group)
if attack_id:
data = {}
data['attack_id'] = attack_id
data['side_menu_data'] = side_menu_data
data['side_menu_mobile_view_data'] = side_menu_mobile_view_data
# External references
ext_ref = group["external_references"]
dates = util.get_created_and_modified_dates(group)
if dates.get('created'):
data['created'] = dates['created']
if dates.get('modified'):
data['modified'] = dates['modified']
if group.get("name"):
data['name'] = group['name']
if group.get("x_mitre_version"):
data['version'] = group["x_mitre_version"]
if isinstance(group.get("x_mitre_contributors"),collections.Iterable):
data['contributors_list'] = group["x_mitre_contributors"]
# Get initial reference list
reference_list = []
# Decleared as an object to be able to pass by reference
next_reference_number = {}
next_reference_number['value'] = 1
reference_list = util.update_reference_list(reference_list, group)
if group.get("description"):
citations_from_descr = util.get_citations_from_descr(group['description'])
data['descr'] = markdown.markdown(group["description"])
data['descr'] = util.filter_urls(data['descr'])
data['descr'] = util.get_descr_reference_sect(citations_from_descr, reference_list, next_reference_number, data['descr'])
if group.get('x_mitre_deprecated'):
data['deprecated'] = True
# Get technique data for techniques used table
data['technique_table_data'] = get_techniques_used_by_group_data(group, reference_list, next_reference_number)
# Get navigator layers for this group
layers = util.get_navigator_layers(
data['name'],
data["attack_id"],
"group",
data["version"] if "version" in data else None,
data['technique_table_data'],
)
data["layers"] = []
for layer in layers:
with open(os.path.join(config.group_markdown_path, "-".join([data['attack_id'], "techniques", layer["domain"]]) + ".md"), "w", encoding='utf8') as layer_json:
subs = config.layer_md.substitute({
"attack_id": data["attack_id"],
"path": "groups/" + data["attack_id"],
"domain": layer["domain"]
})
subs = subs + layer["layer"]
layer_json.write(subs)
data["layers"].append({
"domain": layer["domain"],
"filename": "-".join([data["attack_id"], layer["domain"], "layer"]) + ".json",
"navigator_link_enterprise" : config.navigator_link_enterprise,
"navigator_link_mobile" : config.navigator_link_mobile
})
# Grab software data for Software table
data['software_data'], data['add_software_ref'] = get_software_table_data(group, reference_list, next_reference_number)
data['alias_descriptions'] = util.get_alias_data(group.get("aliases")[1:], ext_ref, reference_list, next_reference_number)
data['bottom_ref'] = util.sort_reference_list(reference_list)
if isinstance(group.get("aliases"), collections.Iterable):
data['aliases_list'] = group["aliases"][1:]
subs = config.group_md.substitute(data)
subs = subs + json.dumps(data)
# Write out the markdown file
with open(os.path.join(config.group_markdown_path, data['attack_id'] + ".md"), "w", encoding='utf8') as md_file:
md_file.write(subs)
def get_groups_table_data():
"""Responsible for generating group table data for the group index page"""
groups_table_data = []
#Now the table on the right, which is made up of group data
for group in config.group_list:
attack_id = util.get_attack_id(group)
if attack_id:
row = {}
row['id'] = attack_id
if group.get("name"):
row['name'] = group['name']
if group.get("description"):
row['descr'] = group["description"]
row['descr'] = markdown.markdown(row['descr'])
row['descr'] = util.filter_urls(row['descr'])
row['descr'] = util.remove_html_paragraph(row['descr'])
if group.get('x_mitre_deprecated'):
row['deprecated'] = True
citation_temp = "(Citation: {})"
p = re.compile("\(Citation: (.*?)\)")
found_citations = p.findall(row['descr'])
# Remove citation
for citation in found_citations:
row['descr'] = row['descr'].replace(citation_temp.format(citation), "")
if isinstance(group.get("aliases"), collections.Iterable):
row['aliases_list'] = group["aliases"][1:]
groups_table_data.append(row)
return groups_table_data
def get_techniques_used_by_group_data(group, reference_list, next_reference_number):
"""Given a group and its reference list, get the techniques used by the
group. Check the reference list for citations, if not found
in list, add it.
"""
technique_list = {}
if config.techniques_used_by_groups.get(group.get('id')):
for technique in config.techniques_used_by_groups[group['id']]:
# Do not add if technique is deprecated
if not technique['object'].get('x_mitre_deprecated'):
technique_list = util.technique_used_helper(technique_list, technique, reference_list, next_reference_number)
technique_data = []
for item in technique_list:
technique_data.append(technique_list[item])
# Sort by technique name
technique_data = sorted(technique_data, key=lambda k: k['name'].lower())
# Sort by domain name
technique_data = sorted(technique_data, key=lambda k: [config.custom_alphabet.index(c) for c in k['domain'].lower()])
return technique_data
def get_software_table_data(group, reference_list, next_reference_number):
"""Given a group, get software table data"""
software_list = {}
reference = False
# Creating map for tools/malware used by groups
# and techniques used by malware/tools
tools_and_malware = [{
'software': config.tools_used_by_groups,
'techniques': config.techniques_used_by_tools
},
{
'software': config.malware_used_by_groups,
'techniques': config.techniques_used_by_malware
}]
# Get malware or tools used by group
for pairing in tools_and_malware:
if pairing['software'].get(group.get('id')):
for software in pairing['software'][group['id']]:
software_id = software['object']['id']
# Check if software not already in software_list dict
if software_id not in software_list:
attack_id = util.get_attack_id(software['object'])
if attack_id:
software_list[software_id] = {}
software_list[software_id]['id'] = attack_id
software_list[software_id]['name'] = software['object']['name']
if software['relationship'].get('description'):
if reference == False:
reference = True
# Get filtered description
software_list[software_id]['descr'] = util.get_filtered_description(reference_list, next_reference_number, software)
elif software['relationship'].get('external_references'):
if reference == False:
reference = True
# Update reference list
reference_list = util.update_reference_list(reference_list, software['relationship'])
software_list[software_id]['refs'] = []
for ext_ref in software['relationship']['external_references']:
if ext_ref.get('source_name'):
row = {}
row['url'] = ext_ref.get('url')
row['number'] = util.find_reference_number(reference_list, next_reference_number, ext_ref['source_name'])
software_list[software_id]['refs'].append(row)
# Check if techniques exists, add techniques used by software
if pairing['techniques'].get(software_id):
if 'techniques' not in software_list[software_id]:
software_list[software_id]['techniques'] = []
for technique in pairing['techniques'][software_id]:
tech_data = {}
t_id = util.get_attack_id(technique['object'])
if t_id:
if util.is_sub_tid(t_id):
tech_data['parent_id'] = util.get_parent_technique_id(t_id)
tech_data['id'] = util.get_sub_technique_id(t_id)
tech_data['name'] = util.get_technique_name(tech_data['parent_id'])
tech_data['sub_name'] = technique['object']['name']
else:
tech_data['id'] = t_id
tech_data['name'] = technique['object']['name']
software_list[software_id]['techniques'].append(tech_data)
# Moving it to an array because jinja does not like to loop
# through dictionaries
data = []
for item in software_list:
if "techniques" in software_list[item]:
software_list[item]['techniques'] = sorted(software_list[item]['techniques'], key=lambda k: k['name'].lower())
data.append(software_list[item])
data = sorted(data, key=lambda k: k['name'].lower())
return data, reference
|
'''
Merge Two Sorted Lists
Asked in:
Microsoft
Yahoo
Amazon
Merge two sorted linked lists and return it as a new list.
The new list should be made by splicing together the nodes of the first two lists, and should also be sorted.
For example, given following linked lists :
5 -> 8 -> 20
4 -> 11 -> 15
The merged list should be :
4 -> 5 -> 8 -> 11 -> 15 -> 20
'''
class Node:
def __init__(self, data):
self.data = data
# store reference (next item)
self.next = None
return
class Solution:
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def mergeTwoLists(self, h1, h2):
d=Node('a')
td=d
while h1 != None and h2 != None:
if h1.data < h2.data:
d.next = h1
h1 = h1.next
else:
d.next = h2
h2 = h2.next
d = d.next
if h1 != None:
d.next = h1
if h2 != None:
d.next = h2
return td.next
|
"""
ro.py
A modern, asynchronous wrapper for the Roblox web API.
Copyright 2020-present jmkdev
License: MIT, see LICENSE
"""
# Find the original here: https://github.com/Rapptz/discord.py/blob/master/discord/__init__.py
__title__ = "roblox"
__author__ = "jmkdev"
__license__ = "MIT"
__copyright__ = "Copyright 2020-present jmkdev"
__version__ = "2.0.0"
import logging
from typing import NamedTuple
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .client import Client
from .utilities.exceptions import *
from .utilities.types import *
from .thumbnails import ThumbnailState, ThumbnailFormat, ThumbnailReturnPolicy, AvatarThumbnailType
from .universes import UniverseGenre, UniverseAvatarType
from .creatortype import CreatorType
class VersionInfo(NamedTuple):
"""
Represents the package's version info.
"""
major: int
minor: int
micro: int
releaselevel: Literal["alpha", "beta", "candidate", "final"]
serial: int
version_info: VersionInfo = VersionInfo(
major=2,
minor=0,
micro=0,
releaselevel="release",
serial=0
)
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
"""API Star Contrib."""
__author__ = """Ryan Anguiano"""
__email__ = 'ryan.anguiano@gmail.com'
__version__ = '0.0.6'
|
from django.contrib import admin
from .models import Comment, Follow, Group, Post
EMPTY_VALUE = '-пусто-'
class CommentInLine(admin.TabularInline):
model = Comment
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('text', 'pub_date', 'author',)
search_fields = ('text',)
list_filter = ('pub_date',)
empty_value_display = EMPTY_VALUE
inlines = [CommentInLine, ]
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'description')
search_fields = ('title', 'description')
@admin.register(Follow)
class FollowAdmin(admin.ModelAdmin):
list_display = ('user', 'author')
search_fields = ('user', 'author')
|
# -- coding: utf-8 --
import logging
import serial
import struct
class TemperatureLogger:
_SERIAL_SPEED = 9600
_SERIAL_TIMEOUT = 1
_PAYLOAD_REQUEST = 'A'
_PAYLOAD_SIZE = 45
_PAYLOAD_DATA_OFFSET_LOW = 7
_PAYLOAD_DATA_OFFSET_HIGH = 9
def __init__(self, port):
self._logger = logging.getLogger(__name__)
self._logger.debug(u"Connecting temperature logger on COM{} at {}bps".format(port, self._SERIAL_SPEED))
# Open serial port
self._port = serial.Serial(port, self._SERIAL_SPEED, timeout=self._SERIAL_TIMEOUT)
def get_temperature(self, channel=0):
# Logger returns data when prompted with 'A' character
self._port.write(self._PAYLOAD_REQUEST)
self._port.flush()
r = self._port.read(self._PAYLOAD_SIZE)
# Unpack data into platform appropriate format
t = (struct.unpack('>h',
r[self._PAYLOAD_DATA_OFFSET_LOW+channel:self._PAYLOAD_DATA_OFFSET_HIGH+channel])[0]) / 10.0
self._logger.debug(u"{} READ ch{}: {}°C".format(self._port.name, channel, t))
return t
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as tx
from matplotlib.collections import LineCollection
import warnings
from distutils.version import LooseVersion
from six import string_types
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import iqr, _kde_support
from .palettes import color_palette, light_palette, dark_palette, blend_palette
__all__ = ["distplot", "kdeplot", "rugplot"]
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From https://stats.stackexchange.com/questions/798/
a = np.asarray(a)
if len(a) < 2:
return 1
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a univariate distribution of observations.
This function combines the matplotlib ``hist`` function (with automatic
calculation of a good default bin size) with the seaborn :func:`kdeplot`
and :func:`rugplot` functions. It can also fit ``scipy.stats``
distributions and plot the estimated PDF over the data.
Parameters
----------
a : Series, 1d-array, or list.
Observed data. If this is a Series object with a ``name`` attribute,
the name will be used to label the data axis.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins. If unspecified, as reference rule is used
that tries to find a useful default.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following a grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, observed values are on y-axis.
norm_hist : bool, optional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevant component of the plot.
ax : matplotlib axis, optional
If provided, plot on this axis.
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
See Also
--------
kdeplot : Show a univariate or bivariate distribution with a kernel
density estimate.
rugplot : Draw small vertical lines to show each observation in a
distribution.
Examples
--------
Show a default plot with a kernel density estimate and histogram with bin
size determined automatically with a reference rule:
.. plot::
:context: close-figs
>>> import seaborn as sns, numpy as np
>>> sns.set(); np.random.seed(0)
>>> x = np.random.randn(100)
>>> ax = sns.distplot(x)
Use Pandas objects to get an informative axis label:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x = pd.Series(x, name="x variable")
>>> ax = sns.distplot(x)
Plot the distribution with a kernel density estimate and rug plot:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, hist=False)
Plot the distribution with a histogram and maximum likelihood gaussian
distribution fit:
.. plot::
:context: close-figs
>>> from scipy.stats import norm
>>> ax = sns.distplot(x, fit=norm, kde=False)
Plot the distribution on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, vertical=True)
Change the color of all the plot elements:
.. plot::
:context: close-figs
>>> sns.set_color_codes()
>>> ax = sns.distplot(x, color="y")
Pass specific parameters to the underlying plot functions:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
... hist_kws={"histtype": "step", "linewidth": 3,
... "alpha": 1, "color": "g"})
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a)
if a.ndim > 1:
a = a.squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
if LooseVersion(mpl.__version__) < LooseVersion("2.2"):
hist_kws.setdefault("normed", norm_hist)
else:
hist_kws.setdefault("density", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
def pdf(x):
return fit.pdf(x, *params)
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if np.nan_to_num(data.var()) == 0:
# Don't try to compute KDE on singular data
msg = "Data must have variance to compute a kernel density estimate."
warnings.warn(msg, UserWarning)
x, y = np.array([]), np.array([])
elif _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently "
"only implemented in statsmodels. "
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
facecolor = kwargs.pop("facecolor", None)
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
facecolor = color if facecolor is None else facecolor
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
shade_kws = dict(
facecolor=facecolor,
alpha=kwargs.get("alpha", 0.25),
clip_on=kwargs.get("clip_on", True),
zorder=kwargs.get("zorder", 1),
)
if shade:
if vertical:
ax.fill_betweenx(y, 0, x, **shade_kws)
else:
ax.fill_between(x, 0, y, **shade_kws)
# Set the density axis minimum to 0
if vertical:
ax.set_xlim(0, auto=None)
else:
ax.set_ylim(0, auto=None)
# Draw the legend here
handles, labels = ax.get_legend_handles_labels()
if legend and handles:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)() * np.std(data)
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, cbar, cbar_ax, cbar_kws, ax, norm, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
if norm:
z = z/np.max(z)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
scout, = ax.plot([], [])
default_color = scout.get_color()
scout.remove()
color = kwargs.pop("color", default_color)
cmap = kwargs.pop("cmap", None)
if cmap is None:
if filled:
cmap = light_palette(color, as_cmap=True)
else:
cmap = dark_palette(color, as_cmap=True)
if isinstance(cmap, string_types):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
label = kwargs.pop("label", None)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
if cbar:
cbar_kws = {} if cbar_kws is None else cbar_kws
ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
if label is not None:
legend_color = cmap(.95) if color is None else color
if filled:
ax.fill_between([], [], color=legend_color, label=label)
else:
ax.plot([], [], color=legend_color, label=label)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, string_types):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T, bw_method=bw)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True,
cumulative=False, shade_lowest=True, cbar=False, cbar_ax=None,
cbar_kws=None, ax=None, normalize_to_max=False, **kwargs):
"""Fit and plot a univariate or bivariate kernel density estimate.
Parameters
----------
data : 1d array-like
Input data.
data2: 1d array-like, optional
Second input data. If present, a bivariate KDE will be estimated.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool, optional
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot. Note that the
underlying computational libraries have different interperetations
for this parameter: ``statsmodels`` uses it directly, but ``scipy``
treats it as a scaling factor for the standard deviation of the
data.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optional
If True, add a legend or label the axes when possible.
cumulative : bool, optional
If True, draw the cumulative distribution estimated by the kde.
shade_lowest : bool, optional
If True, shade the lowest contour of a bivariate KDE plot. Not
relevant when drawing a univariate plot or when ``shade=False``.
Setting this to ``False`` can be useful when you want multiple
densities on the same Axes.
cbar : bool, optional
If True and drawing a bivariate KDE plot, add a colorbar.
cbar_ax : matplotlib axes, optional
Existing axes to draw the colorbar onto, otherwise space is taken
from the main axes.
cbar_kws : dict, optional
Keyword arguments for ``fig.colorbar()``.
ax : matplotlib axes, optional
Axes to plot on, otherwise uses current axes.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Returns
-------
ax : matplotlib Axes
Axes with plot.
See Also
--------
distplot: Flexibly plot a univariate distribution of observations.
jointplot: Plot a joint dataset with bivariate and marginal distributions.
Examples
--------
Plot a basic univariate density:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(10)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> mean, cov = [0, 2], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, size=50).T
>>> ax = sns.kdeplot(x)
Shade under the density curve and use a different color:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, shade=True, color="r")
Plot a bivariate density:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y)
Use filled contours:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, shade=True)
Use more contour levels and a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, n_levels=30, cmap="Purples_d")
Use a narrower bandwith:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, bw=.15)
Plot the density on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(y, vertical=True)
Limit the density curve within the range of the data:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, cut=0)
Add a colorbar for the contours:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, cbar=True)
Plot two shaded bivariate densities:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> setosa = iris.loc[iris.species == "setosa"]
>>> virginica = iris.loc[iris.species == "virginica"]
>>> ax = sns.kdeplot(setosa.sepal_width, setosa.sepal_length,
... cmap="Reds", shade=True, shade_lowest=False)
>>> ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
... cmap="Blues", shade=True, shade_lowest=False)
"""
if ax is None:
ax = plt.gca()
if isinstance(data, list):
data = np.asarray(data)
if len(data) == 0:
return ax
data = data.astype(np.float64)
if data2 is not None:
if isinstance(data2, list):
data2 = np.asarray(data2)
data2 = data2.astype(np.float64)
warn = False
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
warn = True
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
warn = True
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if warn:
warn_msg = ("Passing a 2D dataset for a bivariate plot is deprecated "
"in favor of kdeplot(x, y), and it will cause an error in "
"future versions. Please update your code.")
warnings.warn(warn_msg, UserWarning)
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, shade_lowest,
kernel, bw, gridsize, cut, clip, legend,
cbar, cbar_ax, cbar_kws, ax, normalize_to_max, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=.05, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of observations.
height : scalar, optional
Height of ticks as proportion of the axis.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axes, optional
Axes to draw plot into; otherwise grabs current axes.
kwargs : key, value pairings
Other keyword arguments are passed to ``LineCollection``.
Returns
-------
ax : matplotlib axes
The Axes object with the plot on it.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", axis == "y")
alias_map = dict(linewidth="lw", linestyle="ls", color="c")
for attr, alias in alias_map.items():
if alias in kwargs:
kwargs[attr] = kwargs.pop(alias)
kwargs.setdefault("linewidth", 1)
if vertical:
trans = tx.blended_transform_factory(ax.transAxes, ax.transData)
xy_pairs = np.column_stack([np.tile([0, height], len(a)),
np.repeat(a, 2)])
else:
trans = tx.blended_transform_factory(ax.transData, ax.transAxes)
xy_pairs = np.column_stack([np.repeat(a, 2),
np.tile([0, height], len(a))])
line_segs = xy_pairs.reshape([len(a), 2, 2])
ax.add_collection(LineCollection(line_segs, transform=trans, **kwargs))
ax.autoscale_view(scalex=not vertical, scaley=vertical)
return ax
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import gzip
import yaml
import itertools
import collections
import collections.abc
import random
import resource
import pandas as pd
import skbio
import psutil
import qiime2
from q2_types.per_sample_sequences import (
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqManifestFormat, YamlFormat)
from ._ecc import GolayDecoder
FastqHeader = collections.namedtuple('FastqHeader', ['id', 'description'])
def _read_fastq_seqs(filepath):
# This function is adapted from @jairideout's SO post:
# http://stackoverflow.com/a/39302117/3424666
fh = gzip.open(filepath, 'rt')
for seq_header, seq, qual_header, qual in itertools.zip_longest(*[fh] * 4):
yield (seq_header.strip(), seq.strip(), qual_header.strip(),
qual.strip())
def _trim_id(id):
return id.rsplit('/', 1)[0]
def _trim_description(desc):
# The first number of ':' seperated description is the read number
if ':' in desc:
desc = desc.split(':', 1)[1]
return desc.rsplit('/', 1)[0]
def _record_to_fastq_header(record):
tokens = record[0][1:].split(' ', maxsplit=1)
if len(tokens) == 1:
id, = tokens
description = None
else:
id, description = tokens
return FastqHeader(id=id, description=description)
# This is global so that it can be tested without changing the actual ulimits.
# NOTE: UNIX only
OPEN_FH_LIMIT, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
def _maintain_open_fh_count(per_sample_fastqs, paired=False):
files_to_open = 1 if not paired else 2
# NOTE: UNIX only
if psutil.Process().num_fds() + files_to_open < OPEN_FH_LIMIT:
return
# currently open per-sample files
if not paired:
open_fhs = [fh for fh in per_sample_fastqs.values()
if not fh.closed]
else:
open_fhs = [fh for fh in per_sample_fastqs.values()
if not fh[0].closed]
# If the number of open files reaches the allotted resources limit,
# close around 15% of the open files. 15% was chosen because if you
# only close a single file it will start to have to do it on every loop
# and on a 35 file benchmark using a hard coded limit of 10 files,
# only closing one file added an increased runtime of 160%
n_to_close = round(0.15 * len(open_fhs))
if paired:
n_to_close //= 2
# Never close more than files than are open, also if closing,
# close at least the number of files that will need to be opened.
n_to_close = min(len(open_fhs), max(n_to_close, files_to_open))
for rand_fh in random.sample(open_fhs, n_to_close):
if paired:
fwd, rev = rand_fh
fwd.close()
rev.close()
else:
rand_fh.close()
class BarcodeSequenceFastqIterator(collections.abc.Iterable):
def __init__(self, barcode_generator, sequence_generator):
self.barcode_generator = barcode_generator
self.sequence_generator = sequence_generator
def __iter__(self):
# Adapted from q2-types
for barcode_record, sequence_record in itertools.zip_longest(
self.barcode_generator, self.sequence_generator):
if barcode_record is None:
raise ValueError('More sequences were provided than barcodes.')
if sequence_record is None:
raise ValueError('More barcodes were provided than sequences.')
# The id or description fields may end with "/read-number", which
# will differ between the sequence and barcode reads. Confirm that
# they are identical up until the last /
barcode_header = _record_to_fastq_header(barcode_record)
sequence_header = _record_to_fastq_header(sequence_record)
# confirm that the id fields are equal
if _trim_id(barcode_header.id) != \
_trim_id(sequence_header.id):
raise ValueError(
'Mismatched sequence ids: %s and %s' %
(_trim_id(barcode_header.id),
_trim_id(sequence_header.id)))
# if a description field is present, confirm that they're equal
if barcode_header.description is None and \
sequence_header.description is None:
pass
elif barcode_header.description is None:
raise ValueError(
'Barcode header lines do not contain description fields '
'but sequence header lines do.')
elif sequence_header.description is None:
raise ValueError(
'Sequence header lines do not contain description fields '
'but barcode header lines do.')
elif _trim_description(barcode_header.description) != \
_trim_description(sequence_header.description):
raise ValueError(
'Mismatched sequence descriptions: %s and %s' %
(_trim_description(barcode_header.description),
_trim_description(sequence_header.description)))
yield barcode_record, sequence_record
class BarcodePairedSequenceFastqIterator(collections.abc.Iterable):
def __init__(self, barcode_generator, forward_generator,
reverse_generator):
self.barcode_generator = barcode_generator
self.forward_generator = forward_generator
self.reverse_generator = reverse_generator
def __iter__(self):
# Adapted from q2-types
for barcode_record, forward_record, reverse_record \
in itertools.zip_longest(self.barcode_generator,
self.forward_generator,
self.reverse_generator):
if barcode_record is None:
raise ValueError('More sequences were provided than barcodes.')
if forward_record is None:
raise ValueError('More barcodes were provided than '
'forward-sequences.')
elif reverse_record is None:
raise ValueError('More barcodes were provided than '
'reverse-sequences.')
# The id or description fields may end with "/read-number", which
# will differ between the sequence and barcode reads. Confirm that
# they are identical up until the last /
barcode_header = _record_to_fastq_header(barcode_record)
forward_header = _record_to_fastq_header(forward_record)
reverse_header = _record_to_fastq_header(reverse_record)
# confirm that the id fields are equal
if not (_trim_id(barcode_header.id) ==
_trim_id(forward_header.id) ==
_trim_id(reverse_header.id)):
raise ValueError(
'Mismatched sequence ids: %s, %s, and %s' %
(_trim_id(barcode_header.id),
_trim_id(forward_header.id),
_trim_id(reverse_header.id)))
# if a description field is present, confirm that they're equal
if barcode_header.description is None and \
forward_header.description is None and \
reverse_header.description is None:
pass
elif barcode_header.description is None:
raise ValueError(
'Barcode header lines do not contain description fields '
'but sequence header lines do.')
elif forward_header.description is None:
raise ValueError(
'Forward-read header lines do not contain description '
'fields but barcode header lines do.')
elif reverse_header.description is None:
raise ValueError(
'Reverse-read header lines do not contain description '
'fields but barcode header lines do.')
elif not (_trim_description(barcode_header.description) ==
_trim_description(forward_header.description) ==
_trim_description(reverse_header.description)):
raise ValueError(
'Mismatched sequence descriptions: %s, %s, and %s' %
(_trim_description(barcode_header.description),
_trim_description(forward_header.description),
_trim_description(reverse_header.description)))
yield barcode_record, forward_record, reverse_record
def _make_barcode_map(barcodes, rev_comp_mapping_barcodes):
barcode_map = {}
barcode_len = None
for sample_id, barcode in barcodes.to_series().iteritems():
if barcode_len is None:
barcode_len = len(barcode)
elif len(barcode) != barcode_len:
raise ValueError('Barcodes of different lengths were detected: '
'%d != %d. Variable length barcodes are not '
'supported.' % (len(barcode), barcode_len))
if rev_comp_mapping_barcodes:
barcode = str(skbio.DNA(barcode).reverse_complement())
if barcode in barcode_map:
raise ValueError('A duplicate barcode was detected. The barcode '
'%s was observed for samples %s and %s.'
% (barcode, sample_id, barcode_map[barcode]))
barcode_map[barcode] = sample_id
return barcode_map, barcode_len
def _write_metadata_yaml(dir_fmt):
metadata = YamlFormat()
metadata.path.write_text(yaml.dump({'phred-offset': 33}))
dir_fmt.metadata.write_data(metadata, YamlFormat)
def emp_single(seqs: BarcodeSequenceFastqIterator,
barcodes: qiime2.CategoricalMetadataColumn,
golay_error_correction: bool = True,
rev_comp_barcodes: bool = False,
rev_comp_mapping_barcodes: bool = False
) -> (SingleLanePerSampleSingleEndFastqDirFmt,
pd.DataFrame):
result = SingleLanePerSampleSingleEndFastqDirFmt()
barcode_map, barcode_len = _make_barcode_map(
barcodes, rev_comp_mapping_barcodes)
if golay_error_correction:
decoder = GolayDecoder()
manifest = FastqManifestFormat()
manifest_fh = manifest.open()
manifest_fh.write('sample-id,filename,direction\n')
manifest_fh.write('# direction is not meaningful in this file as these\n')
manifest_fh.write('# data may be derived from forward, reverse, or \n')
manifest_fh.write('# joined reads\n')
per_sample_fastqs = {}
ec_details = []
for i, (barcode_record, sequence_record) in enumerate(seqs, start=1):
barcode_read = barcode_record[1]
if rev_comp_barcodes:
barcode_read = str(skbio.DNA(barcode_read).reverse_complement())
raw_barcode_read = barcode_read[:barcode_len]
if golay_error_correction:
# A three bit filter is implicitly used by the decoder. See Hamady
# and Knight 2009 Genome Research for the justification:
#
# https://genome.cshlp.org/content/19/7/1141.full
#
# Specifically that "...Golay codes of 12 bases can correct all
# triple-bit errors and detect all quadruple-bit errors."
barcode_read, ecc_errors = decoder.decode(raw_barcode_read)
golay_stats = [barcode_read, ecc_errors]
else:
barcode_read = raw_barcode_read
golay_stats = [None, None]
sample_id = barcode_map.get(barcode_read)
record = [
i,
sample_id,
barcode_record[0],
raw_barcode_read,
]
ec_details.append(record + golay_stats)
if sample_id is None:
continue
if sample_id not in per_sample_fastqs:
# The barcode id, lane number and read number are not relevant
# here. We might ultimately want to use a dir format other than
# SingleLanePerSampleSingleEndFastqDirFmt which doesn't care
# about this information. Similarly, the direction of the read
# isn't relevant here anymore.
barcode_id = len(per_sample_fastqs) + 1
path = result.sequences.path_maker(sample_id=sample_id,
barcode_id=barcode_id,
lane_number=1,
read_number=1)
_maintain_open_fh_count(per_sample_fastqs)
per_sample_fastqs[sample_id] = gzip.open(str(path), mode='a')
manifest_fh.write('%s,%s,%s\n' % (sample_id, path.name, 'forward'))
if per_sample_fastqs[sample_id].closed:
_maintain_open_fh_count(per_sample_fastqs)
per_sample_fastqs[sample_id] = gzip.open(
per_sample_fastqs[sample_id].name, mode='a')
fastq_lines = '\n'.join(sequence_record) + '\n'
fastq_lines = fastq_lines.encode('utf-8')
per_sample_fastqs[sample_id].write(fastq_lines)
barcode_count = str(i) # last value here should be our largest record no.
if len(per_sample_fastqs) == 0:
raise ValueError('No sequences were mapped to samples. Check that '
'your barcodes are in the correct orientation (see '
'the rev_comp_barcodes and/or '
'rev_comp_mapping_barcodes options).')
for fh in per_sample_fastqs.values():
fh.close()
manifest_fh.close()
result.manifest.write_data(manifest, FastqManifestFormat)
_write_metadata_yaml(result)
columns = ['id',
'sample',
'barcode-sequence-id',
'barcode-uncorrected',
'barcode-corrected',
'barcode-errors']
details = pd.DataFrame(ec_details, columns=columns)
details['id'] = details['id'].apply(lambda x: 'record-%s' %
str(x).zfill(len(barcode_count)))
details = details.set_index('id')
return result, details
def emp_paired(seqs: BarcodePairedSequenceFastqIterator,
barcodes: qiime2.CategoricalMetadataColumn,
golay_error_correction: bool = True,
rev_comp_barcodes: bool = False,
rev_comp_mapping_barcodes: bool = False
) -> (SingleLanePerSamplePairedEndFastqDirFmt,
pd.DataFrame):
result = SingleLanePerSamplePairedEndFastqDirFmt()
barcode_map, barcode_len = _make_barcode_map(
barcodes, rev_comp_mapping_barcodes)
if golay_error_correction:
decoder = GolayDecoder()
manifest = FastqManifestFormat()
manifest_fh = manifest.open()
manifest_fh.write('sample-id,filename,direction\n')
per_sample_fastqs = {}
ec_details = []
for i, record in enumerate(seqs, start=1):
barcode_record, forward_record, reverse_record = record
barcode_read = barcode_record[1]
if rev_comp_barcodes:
barcode_read = str(skbio.DNA(barcode_read).reverse_complement())
raw_barcode_read = barcode_read[:barcode_len]
if golay_error_correction:
# A three bit filter is implicitly used by the decoder. See Hamady
# and Knight 2009 Genome Research for the justification:
#
# https://genome.cshlp.org/content/19/7/1141.full
#
# Specifically that "...Golay codes of 12 bases can correct all
# triple-bit errors and detect all quadruple-bit errors."
barcode_read, ecc_errors = decoder.decode(raw_barcode_read)
golay_stats = [barcode_read, ecc_errors]
else:
barcode_read = raw_barcode_read
golay_stats = [None, None]
sample_id = barcode_map.get(barcode_read)
record = [
i,
sample_id,
barcode_record[0],
raw_barcode_read,
]
ec_details.append(record + golay_stats)
if sample_id is None:
continue
if sample_id not in per_sample_fastqs:
barcode_id = len(per_sample_fastqs) + 1
fwd_path = result.sequences.path_maker(sample_id=sample_id,
barcode_id=barcode_id,
lane_number=1,
read_number=1)
rev_path = result.sequences.path_maker(sample_id=sample_id,
barcode_id=barcode_id,
lane_number=1,
read_number=2)
_maintain_open_fh_count(per_sample_fastqs, paired=True)
per_sample_fastqs[sample_id] = (
gzip.open(str(fwd_path), mode='a'),
gzip.open(str(rev_path), mode='a')
)
manifest_fh.write('%s,%s,%s\n' % (sample_id, fwd_path.name,
'forward'))
manifest_fh.write('%s,%s,%s\n' % (sample_id, rev_path.name,
'reverse'))
if per_sample_fastqs[sample_id][0].closed:
_maintain_open_fh_count(per_sample_fastqs, paired=True)
fwd, rev = per_sample_fastqs[sample_id]
per_sample_fastqs[sample_id] = (
gzip.open(fwd.name, mode='a'),
gzip.open(rev.name, mode='a')
)
fwd, rev = per_sample_fastqs[sample_id]
fwd.write(('\n'.join(forward_record) + '\n').encode('utf-8'))
rev.write(('\n'.join(reverse_record) + '\n').encode('utf-8'))
barcode_count = str(i) # last value here should be our largest record no.
if len(per_sample_fastqs) == 0:
raise ValueError('No sequences were mapped to samples. Check that '
'your barcodes are in the correct orientation (see '
'the rev_comp_barcodes and/or '
'rev_comp_mapping_barcodes options).')
for fwd, rev in per_sample_fastqs.values():
fwd.close()
rev.close()
manifest_fh.close()
result.manifest.write_data(manifest, FastqManifestFormat)
_write_metadata_yaml(result)
columns = ['id',
'sample',
'barcode-sequence-id',
'barcode-uncorrected',
'barcode-corrected',
'barcode-errors']
details = pd.DataFrame(ec_details, columns=columns)
details['id'] = details['id'].apply(lambda x: 'record-%s' %
str(x).zfill(len(barcode_count)))
details = details.set_index('id')
return result, details
|
from __future__ import annotations
import asyncio
import logging
from collections import UserDict
from time import sleep
import pytest
import dask.config
import distributed.system
from distributed import Client, Event, Nanny, Worker, wait
from distributed.core import Status
from distributed.spill import has_zict_210
from distributed.utils_test import captured_logger, gen_cluster, inc
from distributed.worker_memory import parse_memory_limit
requires_zict_210 = pytest.mark.skipif(
not has_zict_210,
reason="requires zict version >= 2.1.0",
)
def memory_monitor_running(dask_worker: Worker | Nanny) -> bool:
return "memory_monitor" in dask_worker.periodic_callbacks
def test_parse_memory_limit_zero():
assert parse_memory_limit(0, 1) is None
assert parse_memory_limit("0", 1) is None
assert parse_memory_limit(None, 1) is None
def test_resource_limit(monkeypatch):
assert parse_memory_limit("250MiB", 1, total_cores=1) == 1024 * 1024 * 250
new_limit = 1024 * 1024 * 200
monkeypatch.setattr(distributed.system, "MEMORY_LIMIT", new_limit)
assert parse_memory_limit("250MiB", 1, total_cores=1) == new_limit
@gen_cluster(nthreads=[("", 1)], worker_kwargs={"memory_limit": "2e3 MB"})
async def test_parse_memory_limit_worker(s, w):
assert w.memory_manager.memory_limit == 2e9
@gen_cluster(
client=True,
nthreads=[("", 1)],
Worker=Nanny,
worker_kwargs={"memory_limit": "2e3 MB"},
)
async def test_parse_memory_limit_nanny(c, s, n):
assert n.memory_manager.memory_limit == 2e9
out = await c.run(lambda dask_worker: dask_worker.memory_manager.memory_limit)
assert out[n.worker_address] == 2e9
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
config={
"distributed.worker.memory.spill": False,
"distributed.worker.memory.target": False,
},
)
async def test_dict_data_if_no_spill_to_disk(s, w):
assert type(w.data) is dict
class CustomError(Exception):
pass
class FailToPickle:
def __init__(self, *, reported_size=0):
self.reported_size = int(reported_size)
def __getstate__(self):
raise CustomError()
def __sizeof__(self):
return self.reported_size
async def assert_basic_futures(c: Client) -> None:
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
@gen_cluster(client=True)
async def test_fail_to_pickle_target_1(c, s, a, b):
"""Test failure to serialize triggered by key which is individually larger
than target. The data is lost and the task is marked as failed;
the worker remains in usable condition.
"""
x = c.submit(FailToPickle, reported_size=100e9, key="x")
await wait(x)
assert x.status == "error"
with pytest.raises(TypeError, match="Could not serialize"):
await x
await assert_basic_futures(c)
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": "1 kiB"},
config={
"distributed.worker.memory.target": 0.5,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": False,
},
)
async def test_fail_to_pickle_target_2(c, s, a):
"""Test failure to spill triggered by key which is individually smaller
than target, so it is not spilled immediately. The data is retained and
the task is NOT marked as failed; the worker remains in usable condition.
"""
x = c.submit(FailToPickle, reported_size=256, key="x")
await wait(x)
assert x.status == "finished"
assert set(a.data.memory) == {"x"}
y = c.submit(lambda: "y" * 256, key="y")
await wait(y)
if has_zict_210:
assert set(a.data.memory) == {"x", "y"}
else:
assert set(a.data.memory) == {"y"}
assert not a.data.disk
await assert_basic_futures(c)
@requires_zict_210
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": "1 kB"},
config={
"distributed.worker.memory.target": False,
"distributed.worker.memory.spill": 0.7,
"distributed.worker.memory.monitor-interval": "10ms",
},
)
async def test_fail_to_pickle_spill(c, s, a):
"""Test failure to evict a key, triggered by the spill threshold"""
a.monitor.get_process_memory = lambda: 701 if a.data.fast else 0
with captured_logger(logging.getLogger("distributed.spill")) as logs:
bad = c.submit(FailToPickle, key="bad")
await wait(bad)
# Must wait for memory monitor to kick in
while True:
logs_value = logs.getvalue()
if logs_value:
break
await asyncio.sleep(0.01)
assert "Failed to pickle" in logs_value
assert "Traceback" in logs_value
# key is in fast
assert bad.status == "finished"
assert bad.key in a.data.fast
await assert_basic_futures(c)
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": 1200 / 0.6},
config={
"distributed.worker.memory.target": 0.6,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": False,
},
)
async def test_spill_target_threshold(c, s, a):
"""Test distributed.worker.memory.target threshold. Note that in this test we
disabled spill and pause thresholds, which work on the process memory, and just left
the target threshold, which works on managed memory so it is unperturbed by the
several hundreds of MB of unmanaged memory that are typical of the test suite.
"""
assert not memory_monitor_running(a)
x = c.submit(lambda: "x" * 500, key="x")
await wait(x)
y = c.submit(lambda: "y" * 500, key="y")
await wait(y)
assert set(a.data) == {"x", "y"}
assert set(a.data.memory) == {"x", "y"}
z = c.submit(lambda: "z" * 500, key="z")
await wait(z)
assert set(a.data) == {"x", "y", "z"}
assert set(a.data.memory) == {"y", "z"}
assert set(a.data.disk) == {"x"}
await x
assert set(a.data.memory) == {"x", "z"}
assert set(a.data.disk) == {"y"}
@requires_zict_210
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": 1600},
config={
"distributed.worker.memory.target": 0.6,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": False,
"distributed.worker.memory.max-spill": 600,
},
)
async def test_spill_constrained(c, s, w):
"""Test distributed.worker.memory.max-spill parameter"""
# spills starts at 1600*0.6=960 bytes of managed memory
# size in memory ~200; size on disk ~400
x = c.submit(lambda: "x" * 200, key="x")
await wait(x)
# size in memory ~500; size on disk ~700
y = c.submit(lambda: "y" * 500, key="y")
await wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.memory) == {x.key, y.key}
z = c.submit(lambda: "z" * 500, key="z")
await wait(z)
assert set(w.data) == {x.key, y.key, z.key}
# max_spill has not been reached
assert set(w.data.memory) == {y.key, z.key}
assert set(w.data.disk) == {x.key}
# zb is individually larger than max_spill
zb = c.submit(lambda: "z" * 1700, key="zb")
await wait(zb)
assert set(w.data.memory) == {y.key, z.key, zb.key}
assert set(w.data.disk) == {x.key}
del zb
while "zb" in w.data:
await asyncio.sleep(0.01)
# zc is individually smaller than max_spill, but the evicted key together with
# x it exceeds max_spill
zc = c.submit(lambda: "z" * 500, key="zc")
await wait(zc)
assert set(w.data.memory) == {y.key, z.key, zc.key}
assert set(w.data.disk) == {x.key}
@gen_cluster(
nthreads=[("", 1)],
client=True,
worker_kwargs={"memory_limit": "1000 MB"},
config={
"distributed.worker.memory.target": False,
"distributed.worker.memory.spill": 0.7,
"distributed.worker.memory.pause": False,
"distributed.worker.memory.monitor-interval": "10ms",
},
)
async def test_spill_spill_threshold(c, s, a):
"""Test distributed.worker.memory.spill threshold.
Test that the spill threshold uses the process memory and not the managed memory
reported by sizeof(), which may be inaccurate.
"""
assert memory_monitor_running(a)
a.monitor.get_process_memory = lambda: 800_000_000 if a.data.fast else 0
x = c.submit(inc, 0, key="x")
while not a.data.disk:
await asyncio.sleep(0.01)
assert await x == 1
@pytest.mark.parametrize(
"target,managed,expect_spilled",
[
# no target -> no hysteresis
# Over-report managed memory to test that the automated LRU eviction based on
# target is never triggered
(False, int(10e9), 1),
# Under-report managed memory, so that we reach the spill threshold for process
# memory without first reaching the target threshold for managed memory
# target == spill -> no hysteresis
(0.7, 0, 1),
# target < spill -> hysteresis from spill to target
(0.4, 0, 7),
],
)
@gen_cluster(
nthreads=[],
client=True,
config={
"distributed.worker.memory.spill": 0.7,
"distributed.worker.memory.pause": False,
"distributed.worker.memory.monitor-interval": "10ms",
},
)
async def test_spill_hysteresis(c, s, target, managed, expect_spilled):
"""
1. Test that you can enable the spill threshold while leaving the target threshold
to False
2. Test the hysteresis system where, once you reach the spill threshold, the worker
won't stop spilling until the target threshold is reached
"""
class C:
def __sizeof__(self):
return managed
with dask.config.set({"distributed.worker.memory.target": target}):
async with Worker(s.address, memory_limit="1000 MB") as a:
a.monitor.get_process_memory = lambda: 50_000_000 * len(a.data.fast)
# Add 500MB (reported) process memory. Spilling must not happen.
futures = [c.submit(C, pure=False) for _ in range(10)]
await wait(futures)
await asyncio.sleep(0.1)
assert not a.data.disk
# Add another 250MB unmanaged memory. This must trigger the spilling.
futures += [c.submit(C, pure=False) for _ in range(5)]
await wait(futures)
# Wait until spilling starts. Then, wait until it stops.
prev_n = 0
while not a.data.disk or len(a.data.disk) > prev_n:
prev_n = len(a.data.disk)
await asyncio.sleep(0)
assert len(a.data.disk) == expect_spilled
@gen_cluster(
nthreads=[("", 1)],
client=True,
config={
"distributed.worker.memory.target": False,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": False,
},
)
async def test_pause_executor_manual(c, s, a):
assert not memory_monitor_running(a)
# Task that is running when the worker pauses
ev_x = Event()
def f(ev):
ev.wait()
return 1
# Task that is running on the worker when the worker pauses
x = c.submit(f, ev_x, key="x")
while a.executing_count != 1:
await asyncio.sleep(0.01)
# Task that is queued on the worker when the worker pauses
y = c.submit(inc, 1, key="y")
while "y" not in a.tasks:
await asyncio.sleep(0.01)
a.status = Status.paused
# Wait for sync to scheduler
while s.workers[a.address].status != Status.paused:
await asyncio.sleep(0.01)
# Task that is queued on the scheduler when the worker pauses.
# It is not sent to the worker.
z = c.submit(inc, 2, key="z")
while "z" not in s.tasks or s.tasks["z"].state != "no-worker":
await asyncio.sleep(0.01)
assert s.unrunnable == {s.tasks["z"]}
# Test that a task that already started when the worker paused can complete
# and its output can be retrieved. Also test that the now free slot won't be
# used by other tasks.
await ev_x.set()
assert await x == 1
await asyncio.sleep(0.05)
assert a.executing_count == 0
assert len(a.ready) == 1
assert a.tasks["y"].state == "ready"
assert "z" not in a.tasks
# Unpause. Tasks that were queued on the worker are executed.
# Tasks that were stuck on the scheduler are sent to the worker and executed.
a.status = Status.running
assert await y == 2
assert await z == 3
@gen_cluster(
nthreads=[("", 1)],
client=True,
worker_kwargs={"memory_limit": "1000 MB"},
config={
"distributed.worker.memory.target": False,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": 0.8,
"distributed.worker.memory.monitor-interval": "10ms",
},
)
async def test_pause_executor_with_memory_monitor(c, s, a):
assert memory_monitor_running(a)
mocked_rss = 0
a.monitor.get_process_memory = lambda: mocked_rss
# Task that is running when the worker pauses
ev_x = Event()
def f(ev):
ev.wait()
return 1
# Task that is running on the worker when the worker pauses
x = c.submit(f, ev_x, key="x")
while a.executing_count != 1:
await asyncio.sleep(0.01)
with captured_logger(logging.getLogger("distributed.worker_memory")) as logger:
# Task that is queued on the worker when the worker pauses
y = c.submit(inc, 1, key="y")
while "y" not in a.tasks:
await asyncio.sleep(0.01)
# Hog the worker with 900MB unmanaged memory
mocked_rss = 900_000_000
while s.workers[a.address].status != Status.paused:
await asyncio.sleep(0.01)
assert "Pausing worker" in logger.getvalue()
# Task that is queued on the scheduler when the worker pauses.
# It is not sent to the worker.
z = c.submit(inc, 2, key="z")
while "z" not in s.tasks or s.tasks["z"].state != "no-worker":
await asyncio.sleep(0.01)
assert s.unrunnable == {s.tasks["z"]}
# Test that a task that already started when the worker paused can complete
# and its output can be retrieved. Also test that the now free slot won't be
# used by other tasks.
await ev_x.set()
assert await x == 1
await asyncio.sleep(0.05)
assert a.executing_count == 0
assert len(a.ready) == 1
assert a.tasks["y"].state == "ready"
assert "z" not in a.tasks
# Release the memory. Tasks that were queued on the worker are executed.
# Tasks that were stuck on the scheduler are sent to the worker and executed.
mocked_rss = 0
assert await y == 2
assert await z == 3
assert a.status == Status.running
assert "Resuming worker" in logger.getvalue()
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": 0},
config={"distributed.worker.memory.monitor-interval": "10ms"},
)
async def test_avoid_memory_monitor_if_zero_limit_worker(c, s, a):
assert type(a.data) is dict
assert not memory_monitor_running(a)
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.05)
assert await c.submit(inc, 2) == 3 # worker doesn't pause
@gen_cluster(
client=True,
nthreads=[("", 1)],
Worker=Nanny,
worker_kwargs={"memory_limit": 0},
config={"distributed.worker.memory.monitor-interval": "10ms"},
)
async def test_avoid_memory_monitor_if_zero_limit_nanny(c, s, nanny):
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
assert not memory_monitor_running(nanny)
assert not (await c.run(memory_monitor_running))[nanny.worker_address]
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
assert await c.submit(inc, 2) == 3 # worker doesn't pause
@gen_cluster(nthreads=[])
async def test_override_data_worker(s):
# Use a UserDict to sidestep potential special case handling for dict
async with Worker(s.address, data=UserDict) as w:
assert type(w.data) is UserDict
data = UserDict({"x": 1})
async with Worker(s.address, data=data) as w:
assert w.data is data
assert w.data == {"x": 1}
@gen_cluster(
client=True,
nthreads=[("", 1)],
Worker=Nanny,
worker_kwargs={"data": UserDict},
)
async def test_override_data_nanny(c, s, n):
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[n.worker_address] is UserDict
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": "1 GB", "data": UserDict},
config={"distributed.worker.memory.monitor-interval": "10ms"},
)
async def test_override_data_vs_memory_monitor(c, s, a):
a.monitor.get_process_memory = lambda: 801_000_000 if a.data else 0
assert memory_monitor_running(a)
# Push a key that would normally trip both the target and the spill thresholds
class C:
def __sizeof__(self):
return 801_000_000
# Capture output of log_errors()
with captured_logger(logging.getLogger("distributed.utils")) as logger:
x = c.submit(C)
await wait(x)
# The pause subsystem of the memory monitor has been tripped.
# The spill subsystem hasn't.
while a.status != Status.paused:
await asyncio.sleep(0.01)
await asyncio.sleep(0.05)
# This would happen if memory_monitor() tried to blindly call SpillBuffer.evict()
assert "Traceback" not in logger.getvalue()
assert type(a.data) is UserDict
assert a.data.keys() == {x.key}
class ManualEvictDict(UserDict):
"""A MutableMapping which implements distributed.spill.ManualEvictProto"""
def __init__(self):
super().__init__()
self.evicted = set()
@property
def fast(self):
# Any Sized of bool will do
return self.keys() - self.evicted
def evict(self):
# Evict a random key
k = next(iter(self.fast))
self.evicted.add(k)
return 1
@gen_cluster(
client=True,
nthreads=[("", 1)],
worker_kwargs={"memory_limit": "1 GB", "data": ManualEvictDict},
config={
"distributed.worker.memory.pause": False,
"distributed.worker.memory.monitor-interval": "10ms",
},
)
async def test_manual_evict_proto(c, s, a):
"""data is a third-party dict-like which respects the ManualEvictProto duck-type
API. spill threshold is respected.
"""
a.monitor.get_process_memory = lambda: 701_000_000 if a.data else 0
assert memory_monitor_running(a)
assert isinstance(a.data, ManualEvictDict)
futures = await c.scatter({"x": None, "y": None, "z": None})
while a.data.evicted != {"x", "y", "z"}:
await asyncio.sleep(0.01)
@pytest.mark.slow
@gen_cluster(
nthreads=[("", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "400 MiB"},
config={"distributed.worker.memory.monitor-interval": "10ms"},
)
async def test_nanny_terminate(c, s, a):
def leak():
L = []
while True:
L.append(b"0" * 5_000_000)
sleep(0.01)
before = a.process.pid
with captured_logger(logging.getLogger("distributed.worker_memory")) as logger:
future = c.submit(leak)
while a.process.pid == before:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@pytest.mark.parametrize(
"cls,name,value",
[
(Worker, "memory_limit", 123e9),
(Worker, "memory_target_fraction", 0.789),
(Worker, "memory_spill_fraction", 0.789),
(Worker, "memory_pause_fraction", 0.789),
(Nanny, "memory_limit", 123e9),
(Nanny, "memory_terminate_fraction", 0.789),
],
)
@gen_cluster(nthreads=[])
async def test_deprecated_attributes(s, cls, name, value):
async with cls(s.address) as a:
with pytest.warns(FutureWarning, match=name):
setattr(a, name, value)
with pytest.warns(FutureWarning, match=name):
assert getattr(a, name) == value
assert getattr(a.memory_manager, name) == value
@gen_cluster(nthreads=[("", 1)])
async def test_deprecated_memory_monitor_method_worker(s, a):
with pytest.warns(FutureWarning, match="memory_monitor"):
await a.memory_monitor()
@gen_cluster(nthreads=[("", 1)], Worker=Nanny)
async def test_deprecated_memory_monitor_method_nanny(s, a):
with pytest.warns(FutureWarning, match="memory_monitor"):
a.memory_monitor()
@pytest.mark.parametrize(
"name",
["memory_target_fraction", "memory_spill_fraction", "memory_pause_fraction"],
)
@gen_cluster(nthreads=[])
async def test_deprecated_params(s, name):
with pytest.warns(FutureWarning, match=name):
async with Worker(s.address, **{name: 0.789}) as a:
assert getattr(a.memory_manager, name) == 0.789
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler class for implementing a buffer."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
class Buffer(object):
"""Implements a Buffer.
Placeholder for temporarily storing data (i.e. sys.stdout, SOAP messages).
"""
def __init__(self):
"""Inits Buffer."""
self._buffer = ''
def write(self, str_in):
"""Append given string to a buffer.
Args:
str_in: str String to append to a buffer.
"""
self._buffer += str_in
def flush(self):
pass
def GetBufferAsStr(self):
"""Return buffer as string.
Returns:
str Buffer.
"""
return str(self._buffer)
|
from os import path as os_path
from os import mkdir as os_mkdir
from rdkit.Chem import MolFromSmiles, MolFromInchi, MolToSmiles, MolToInchi, MolToInchiKey
from csv import DictReader as csv_DictReader
from csv import reader as csv_reader
from logging import getLogger as logging_getLogger
from json import dump as json_dump
from json import load as json_load
from gzip import open as gzip_open
from re import findall as re_findall
from time import time as time_time
from brs_utils import print_OK, print_FAILED, download
from requests import exceptions as r_exceptions
from redis import StrictRedis
from credisdict import CRedisDict, wait_for_redis
from argparse import ArgumentParser as argparse_ArgParser
from hashlib import sha512
from pathlib import Path
from colored import attr as c_attr
#######################################################
################### rpCache ##########################
#######################################################
def add_arguments(parser):
parser.add_argument('-sm', '--store_mode', type=str, default='file',
help='data storage mode: file or db')
parser.add_argument('--gen_cache', default=None, type=str, dest='cache_dir',
help='generate the cache and exits')
# parser.add_argument('-p', '--print', type=bool, default=False,
# help='print additional informations')
return parser
def build_parser():
return add_arguments(argparse_ArgParser('Python script to pre-compute data'))
class rpCache:
"""Class to generate the cache
Contains all the functions that parse different files, used to calculate the thermodynamics and the FBA of the the other steps. These should be called only when the files have changes
"""
logger = logging_getLogger(__name__)
# logger.info('Started instance of rpCache')
# _input_cache_url = 'ftp://ftp.vital-it.ch/databases/metanetx/MNXref/3.2/'
_cache_url = 'https://gitlab.com/breakthewall/rpcache-data/-/raw/master/'
# static attribues
_convertMNXM = {
'MNXM162231': 'MNXM6',
'MNXM84': 'MNXM15',
'MNXM96410': 'MNXM14',
'MNXM114062': 'MNXM3',
'MNXM145523': 'MNXM57',
'MNXM57425': 'MNXM9',
'MNXM137': 'MNXM588022'
}
# name: sha512sum
_input_cache_files = {
'chem_xref.tsv.gz': 'e558110990dcc75af943863790dc55360fd2d40ecb17d02335377671e80f0ab3738fd556acb340e03e48dd1afdec3eece1e92df1e18bc24e7445f24f778a10da',
'reac_xref.tsv.gz': '48b991cf4a9c2ca573d395cf35c378881ed79e87772827647bfab2f6345499698664e07195ec10b342fc0164304dbd2363cccff1a1182225e6afebce3c16448b',
'compounds.tsv.gz': '719716bb880257bd014e045c03eb8dd12e2bbeba3aa52e38e9632ce605817b9dc09530e81fadd25542c0a439bdb81e1dfbd3a38f35b30b061845d1a880dbfe01',
'chem_prop.tsv.gz': 'f2d220d1f0425e5e47f01e7deccfa46b60094d43b9f62b191ffb0fab8c00ef79e87c3b71d10bdcd26020608094f24884f51b3ebc3d7d3c9a6d594c6eaa324c66',
'retrorules_rr02_flat_all.tsv.gz': '890bdd24042c0192b5538964d775feefcb6cff9ad5f35690bfbfc5ae09334dd19df6828cdfc7f57a2018e090571517122b99d8760128052af898c638ae667e24',
'comp_xref.tsv.gz': '913a827f3645fda1699676ae6c32b9d7a8debae97ce7b0c386d8447f4eee5aa721d31bfb856d4092b3d5e987a8f19a6fe4bd28ddf1c5df5f85e71c3625bd1d81',
'rxn_recipes.tsv.gz': 'dc0624f5ed7ab0b691d9a6ba02571a5cf334cfdb3109e78c98708e31574c46aeac2a97e9433788d80490ff80337679ccfd706cbb8e71a11cdc6122573bb69b0f'
}
# Attributes with dependencies (other attributes + input_cache files)
__attributes = {
'deprecatedCID_cid': {'attr_deps': [],
'file_deps': ['chem_xref.tsv.gz']},
'deprecatedRID_rid': {'attr_deps': [],
'file_deps': []},
'cid_strc': {'attr_deps': ['deprecatedCID_cid'],
'file_deps': ['compounds.tsv.gz', 'chem_prop.tsv.gz']},
'cid_name': {'attr_deps': ['deprecatedCID_cid'],
'file_deps': ['compounds.tsv.gz', 'chem_prop.tsv.gz']},
'cid_xref': {'attr_deps': ['deprecatedCID_cid'],
'file_deps': []},
'chebi_cid': {'attr_deps': ['cid_xref'],
'file_deps': []},
'rr_reactions': {'attr_deps': ['deprecatedCID_cid', 'deprecatedRID_rid'],
'file_deps': ['retrorules_rr02_flat_all.tsv.gz']},
'inchikey_cid': {'attr_deps': ['cid_strc'],
'file_deps': []},
'comp_xref': {'attr_deps': [],
'file_deps': ['comp_xref.tsv.gz']},
'deprecatedCompID_compid': {'attr_deps': [],
'file_deps': ['comp_xref.tsv.gz']},
'rr_full_reactions': {'attr_deps': ['deprecatedCID_cid', 'deprecatedRID_rid'],
'file_deps': ['rxn_recipes.tsv.gz']},
}
_attributes = list(__attributes.keys())
# name: sha512sum
_cache_files = {
_attributes[0]+'.json.gz': '698a3e83cf4f9206ea2644c9c35a9af53957838baaae6efb245d02b6b8d0ea8b25c75008e562b99ba3e0189e50ee47655376f2d0635f6206e0015f91f0e4bad8',
_attributes[1]+'.json.gz': '51554c6f6ae99c6755da7496208b3feec30547bc4cf3007d9fd30f46fa4c0cc73bad5aeb743dca07e32711c4346504296bee776d135fb18e96c891a0086fc87e',
_attributes[2]+'.json.gz': '0021ef63165d75ee6b8c209ccf14b8a1b8b7b263b4077f544729c47b5525f66511c3fa578fd2089201abb61693085b9912639e62f7b7481d06ad1f38bfc2dd8e',
_attributes[3]+'.json.gz': '7d559cc7389c0cb2bd10f92e6e845bb5724be64d1624adc4e447111fc63599bb69396cd0cc3066a6bb19910c00e266c97e21b1254d9a6dc9da3a8b033603fcff',
_attributes[4]+'.json.gz': '587d6c5206ee94e63af6d9eaf49fd5e2ca417308b3ece8a7f47e916c42376e2c8635a031ce26dc815cd7330f2323054a44d23951e416a9a29c5a9a2ab51e8953',
_attributes[5]+'.json.gz': '8783aaa65a281c4a7ab3a82a6dc99620418ed2be4a739f46db8ee304fcb3536a78fed5a955e1c373a20c3e7d3673793157c792b4429ecb5c68ddaddb1a0f7de7',
_attributes[6]+'.json.gz': '8007480fc607caf41f0f9a93beb66c7caa66c37a3d01a809f6b94bc0df469cec72091e8cc0fbabb3bd8775e9776b928ecda2779fc545c7e4b9e71c504f9510ce',
_attributes[7]+'.json.gz': 'afc2ad3d31366a8f7fe1604fa49c190ade6d46bc8915f30bd20fdfdfc663c979bb10ca55ad10cadec6002a17add46639c70e7adf89cb66c57ed004fd3e4f0051',
_attributes[8]+'.json.gz': '81c673fe1940e25a6a9722fd74b16bc30e1590db0c40810f541ad4ffba7ae04c01268b929d4bf944e84095a0c2a1d0079d1861bc1df3e8308fbb6b35e0aaf107',
_attributes[9]+'.json.gz': '599e4de4935d2ba649c0b526d8aeef6f0e3bf0ed9ee20adad65cb86b078ac139e4cc9758945c2bb6da1c6840867239c5415cb5bceeb80164798ff627aac0a985',
_attributes[10]+'.json.gz': '599e4de4935d2ba649c0b526d8aeef6f0e3bf0ed9ee20adad65cb86b078ac139e4cc9758945c2bb6da1c6840867239c5415cb5bceeb80164798ff627aac0a985'
}
_ext = '.json.gz'
_pubchem_species = {}
## Cache constructor
#
# @param self The object pointer
# @param db Mode of storing objects ('file' or 'redis')
def __init__(self, db='file', attrs=''):
self.store_mode = db
rpCache._db_timeout = 10
if attrs:
self._attributes = attrs
self.dirname = os_path.dirname(os_path.abspath( __file__ ))#+"/.."
# input_cache
self._input_cache_dir = self.dirname+'/input_cache/'
# cache
self._cache_dir = self.dirname+'/cache/'
if self.store_mode!='file':
self.redis = StrictRedis(host=self.store_mode, port=6379, db=0, decode_responses=True)
if not wait_for_redis(self.redis, self._db_timeout):
rpCache.logger.critical("Database "+self.store_mode+" is not reachable")
exit()
for attr in self._attributes:
setattr(self, attr, CRedisDict(attr, self.redis))
else:
for attr in self._attributes:
setattr(self, attr, None)
try:
self._check_or_load_cache()
except FileNotFoundError:
print_FAILED()
try:
rpCache._check_or_download_cache_to_disk(self._cache_dir, self._attributes)
self._check_or_load_cache()
except (r_exceptions.RequestException,
r_exceptions.InvalidSchema,
r_exceptions.ConnectionError):
print_FAILED()
rpCache.generate_cache(self._cache_dir)
self._check_or_load_cache()
def get(self, attr):
return getattr(self, attr)
#####################################################
################# ERROR functions ###################
#####################################################
class Error(Exception):
"""Error function for the convertion of structures
"""
pass
class DepictionError(Error):
"""Error function for the convertion of structures
"""
def __init__(self, message):
"""Constructor for the class
:param message: The error handling message string
:type message: str
:rtype: None
:return: None
"""
#self.expression = expression
self.message = message
#url = 'https://www.metanetx.org/cgi-bin/mnxget/mnxref/'
#url = 'ftp://ftp.vital-it.ch/databases/metanetx/MNXref/3.2/'
@staticmethod
def generate_cache(outdir):
if outdir == '':
outdir = 'cache'
if not os_path.isdir(outdir):
os_mkdir(outdir)
outdir += '/'
url = rpCache._cache_url
# FETCH INPUT_CACHE FILES
input_dir = 'input-'+os_path.basename(os_path.normpath(outdir))+'/'
for file in rpCache._input_cache_files.keys():
rpCache._download_input_cache(url, file, input_dir)
# GENERATE CACHE FILES AND STORE THEM TO DISK
deprecatedCID_cid = rpCache._gen_deprecatedCID_cid(input_dir, outdir)
cid_strc, cid_name = rpCache._gen_cid_strc_cid_name(input_dir, outdir, deprecatedCID_cid)
rpCache._gen_inchikey_cid(input_dir, outdir, cid_strc)
del cid_strc, cid_name
cid_xref = rpCache._gen_cid_xref(input_dir, outdir, deprecatedCID_cid)
rpCache._gen_chebi_cid(input_dir, outdir, cid_xref)
del cid_xref
deprecatedRID_rid = rpCache._gen_deprecatedRID_rid(input_dir, outdir)
rpCache._gen_rr_reactions(input_dir, outdir, deprecatedCID_cid, deprecatedRID_rid)
rpCache._gen_comp_xref_deprecatedCompID_compid(input_dir, outdir)
rpCache._gen_rr_full_reactions(input_dir, outdir, deprecatedCID_cid, deprecatedRID_rid)
del deprecatedCID_cid, deprecatedRID_rid
@staticmethod
def _gen_deprecatedCID_cid(input_dir, outdir):
attribute = 'deprecatedCID_cid'
print(c_attr('bold')+attribute+c_attr('reset'))
deprecatedCID_cid = None
f_deprecatedCID_cid = outdir+attribute+rpCache._ext
if not os_path.isfile(f_deprecatedCID_cid):
print(" Generating data...", end = '', flush=True)
deprecatedCID_cid = rpCache._m_deprecatedMNXM(input_dir+'chem_xref.tsv.gz')
#overwrite (or not if it dosn't exist) entries that are defined by Thomas
try:
user_mnx_replace = json_load(open('data/mnx_replace.json', 'r'))
for user_deprecated_mnxm in user_mnx_replace:
deprecatedCID_cid[user_deprecated_mnxm] = user_mnx_replace[user_deprecated_mnxm]['mnx']
except FileNotFoundError:
print(" Error data/mnx_replace.json file not found")
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(deprecatedCID_cid, f_deprecatedCID_cid)
print_OK()
else:
deprecatedCID_cid = rpCache._load_cache_from_file(f_deprecatedCID_cid)
print(" Cache file already exists", end = '', flush=True)
print_OK()
return {'attr': deprecatedCID_cid, 'file': f_deprecatedCID_cid}
@staticmethod
def _gen_cid_strc_cid_name(input_dir, outdir, deprecatedCID_cid):
attribute = 'cid_strc, cid_name'
print(c_attr('bold')+attribute+c_attr('reset'))
cid_strc = None
cid_name = None
f_cid_strc = outdir+'cid_strc'+rpCache._ext
f_cid_name = outdir+'cid_name'+rpCache._ext
if not os_path.isfile(f_cid_strc):
if not deprecatedCID_cid['attr']:
print(" Loading input data from file...", end = '', flush=True)
deprecatedCID_cid = rpCache._load_cache_from_file(deprecatedCID_cid['file'])
print_OK()
print(" Generating data...", end = '', flush=True)
cid_strc, cid_name = rpCache._m_mnxm_strc(input_dir+'/compounds.tsv.gz', input_dir+'chem_prop.tsv.gz', deprecatedCID_cid['attr'])
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(cid_strc, f_cid_strc)
rpCache._store_cache_to_file(cid_name, f_cid_name)
print_OK()
else:
cid_strc = rpCache._load_cache_from_file(f_cid_strc)
print(" Cache file already exists", end = '', flush=True)
print_OK()
return {'attr': cid_strc, 'file': f_cid_strc}, {'attr': cid_name, 'file': f_cid_name}
@staticmethod
def _gen_inchikey_cid(input_dir, outdir, cid_strc):
attribute = 'inchikey_cid'
print(c_attr('bold')+attribute+c_attr('reset'))
inchikey_cid = None
f_inchikey_cid = outdir+attribute+rpCache._ext
if not os_path.isfile(f_inchikey_cid):
if not cid_strc['attr']:
print(" Loading input data from file...", end = '', flush=True)
cid_strc['attr'] = rpCache._load_cache_from_file(cid_strc['file'])
print_OK()
print(" Generating data...", end = '', flush=True)
inchikey_cid = rpCache._m_inchikey_cid(cid_strc['attr'])
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(inchikey_cid, f_inchikey_cid)
print_OK()
else:
print(" Cache file already exists", end = '', flush=True)
print_OK()
@staticmethod
def _gen_cid_xref(input_dir, outdir, deprecatedCID_cid):
attribute = 'cid_xref'
print(c_attr('bold')+attribute+c_attr('reset'))
cid_xref = None
f_cid_xref = outdir+attribute+rpCache._ext
if not os_path.isfile(f_cid_xref):
if not deprecatedCID_cid['attr']:
print(" Loading input data from file...", end = '', flush=True)
deprecatedCID_cid['attr'] = rpCache._load_cache_from_file(deprecatedCID_cid['file'])
print_OK()
print(" Generating data...", end = '', flush=True)
cid_xref = rpCache._m_mnxm_xref(input_dir+'chem_xref.tsv.gz', deprecatedCID_cid['attr'])
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(cid_xref, f_cid_xref)
print_OK()
else:
cid_xref = rpCache._load_cache_from_file(f_cid_xref)
print(" Cache file already exists", end = '', flush=True)
print_OK()
return {'attr': cid_xref, 'file': f_cid_xref}
@staticmethod
def _gen_chebi_cid(input_dir, outdir, cid_xref):
attribute = 'chebi_cid'
print(c_attr('bold')+attribute+c_attr('reset'))
chebi_cid = None
f_chebi_cid = outdir+attribute+rpCache._ext
if not os_path.isfile(f_chebi_cid):
print(" Generating data...", end = '', flush=True)
chebi_cid = rpCache._m_chebi_cid(cid_xref['attr'])
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(chebi_cid, f_chebi_cid)
del chebi_cid
print_OK()
else:
print(" Cache file already exists", end = '', flush=True)
print_OK()
@staticmethod
def _gen_deprecatedRID_rid(input_dir, outdir):
attribute = 'deprecatedRID_rid'
print(c_attr('bold')+attribute+c_attr('reset'))
deprecatedRID_rid = None
f_deprecatedRID_rid = outdir+attribute+rpCache._ext
if not os_path.isfile(f_deprecatedRID_rid):
print(" Generating data...", end = '', flush=True)
deprecatedRID_rid = rpCache._m_deprecatedMNXR(input_dir+'reac_xref.tsv.gz')
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(deprecatedRID_rid, f_deprecatedRID_rid)
print_OK()
else:
deprecatedRID_rid = rpCache._load_cache_from_file(f_deprecatedRID_rid)
print(" Cache file already exists", end = '', flush=True)
print_OK()
return {'attr': deprecatedRID_rid, 'file': f_deprecatedRID_rid}
@staticmethod
def _gen_rr_reactions(input_dir, outdir, deprecatedCID_cid, deprecatedRID_rid):
attribute = 'rr_reactions'
print(c_attr('bold')+attribute+c_attr('reset'))
rr_reactions = None
f_rr_reactions = outdir+attribute+rpCache._ext
if not os_path.isfile(f_rr_reactions):
if not deprecatedCID_cid['attr']:
print(" Loading input data from file...", end = '', flush=True)
deprecatedCID_cid['attr'] = rpCache._load_cache_from_file(deprecatedCID_cid['file'])
print_OK()
if not deprecatedRID_rid['attr']:
print(" Loading input data from file...", end = '', flush=True)
deprecatedRID_rid['attr'] = rpCache._load_cache_from_file(deprecatedRID_rid['file'])
print_OK()
print(" Generating data...", end = '', flush=True)
rr_reactions = rpCache._m_rr_reactions(input_dir+'retrorules_rr02_flat_all.tsv.gz', deprecatedCID_cid, deprecatedRID_rid)
print_OK()
del deprecatedRID_rid
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(rr_reactions, f_rr_reactions)
print_OK()
del rr_reactions
else:
print(" Cache file already exists", end = '', flush=True)
print_OK()
# return deprecatedCID_cid
@staticmethod
def _gen_comp_xref_deprecatedCompID_compid(input_dir, outdir):
attribute = 'comp_xref, deprecatedCompID_compid'
print(c_attr('bold')+attribute+c_attr('reset'))
comp_xref = deprecatedCompID_compid = None
f_comp_xref = outdir+'comp_xref'+rpCache._ext
f_deprecatedCompID_compid = outdir+'deprecatedCompID_compid'+rpCache._ext
if not os_path.isfile(f_comp_xref) or not os_path.isfile(f_deprecatedCompID_compid):
print(" Generating data...", end = '', flush=True)
comp_xref,deprecatedCompID_compid = rpCache._m_mnxc_xref(input_dir+'comp_xref.tsv.gz')
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(comp_xref, f_comp_xref)
print_OK()
del comp_xref
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(deprecatedCompID_compid, f_deprecatedCompID_compid)
print_OK()
del deprecatedCompID_compid
else:
print(" Cache files already exist", end = '', flush=True)
print_OK()
@staticmethod
def _gen_rr_full_reactions(input_dir, outdir, deprecatedCID_cid, deprecatedRID_rid):
attribute = 'rr_full_reactions'
print(c_attr('bold')+attribute+c_attr('reset'))
rr_full_reactions = None
f_rr_full_reactions = outdir+attribute+rpCache._ext
if not os_path.isfile(f_rr_full_reactions):
print(" Generating data...", end = '', flush=True)
if not deprecatedCID_cid['attr']:
print(" Loading input data from file...", end = '', flush=True)
deprecatedCID_cid = rpCache._load_cache_from_file(deprecatedCID_cid['file'])
print_OK()
if not deprecatedRID_rid:
print(" Loading input data from file...", end = '', flush=True)
deprecatedRID_rid = rpCache._load_cache_from_file(deprecatedRID_rid['file'])
print_OK()
rr_full_reactions = rpCache._m_rr_full_reactions(input_dir+'rxn_recipes.tsv.gz', deprecatedCID_cid['attr'], deprecatedRID_rid['attr'])
print_OK()
print(" Writing data to file...", end = '', flush=True)
rpCache._store_cache_to_file(rr_full_reactions, f_rr_full_reactions)
print_OK()
del rr_full_reactions
else:
print(" Cache file already exists", end = '', flush=True)
print_OK()
@staticmethod
def _check_or_download_cache_to_disk(cache_dir, attributes):
for attr in attributes:
filename = attr+rpCache._ext
if os_path.isfile(cache_dir+filename) and sha512(Path(cache_dir+filename).read_bytes()).hexdigest()==rpCache._cache_files[filename]:
print(filename+" already downloaded ", end = '', flush=True)
print_OK()
else:
filename = attr+rpCache._ext
print("Downloading "+filename+"...", end = '', flush=True)
start_time = time_time()
if not os_path.isdir(cache_dir):
os_mkdir(cache_dir)
download(rpCache._cache_url+filename, cache_dir+filename)
rpCache._cache_files[attr] = True
end_time = time_time()
print_OK(end_time-start_time)
def _load_from_file(self, attribute):
filename = attribute+rpCache._ext
print("Loading "+filename+"...", end = '', flush=True)
data = self._load_cache_from_file(self._cache_dir+filename)
print_OK()
return data
def _check_or_load_cache(self):
if self.store_mode=='file':
self._check_or_load_cache_in_memory()
else:
self._check_or_load_cache_in_db()
def _check_or_load_cache_in_memory(self):
for attribute in self._attributes:
if not getattr(self, attribute):
setattr(self, attribute, self._load_from_file(attribute))
else:
print(attribute+" already loaded in memory...", end = '', flush=True)
print_OK()
def _check_or_load_cache_in_db(self):
for attribute in self._attributes:
if not CRedisDict.exists(self.redis, attribute):
self._store_cache_to_db(attribute, self._load_from_file(attribute))
else:
print(attribute+" already loaded in db...", end = '', flush=True)
print_OK()
@staticmethod
def _download_input_cache(url, file, outdir):
if not os_path.isdir(outdir):
os_mkdir(outdir)
filename = outdir+file
if not os_path.isfile(filename):
print("Downloading "+file+"...", end = '', flush=True)
start_time = time_time()
rpCache.__download_input_cache(url, file, outdir)
end_time = time_time()
print_OK(end_time-start_time)
else:
print(filename+" already downloaded ", end = '', flush=True)
print_OK()
@staticmethod
def __download_input_cache(url, file, outdir):
if not os_path.isdir(outdir):
os_mkdir(outdir)
# 3xCommon + rpReader
if file in ['reac_xref.tsv.gz', 'chem_xref.tsv.gz', 'chem_prop.tsv.gz', 'comp_xref.tsv.gz']:
download(url+'metanetx/'+file, outdir+file)
#TODO: need to add this file to the git or another location
if file in ['compounds.tsv.gz', 'rxn_recipes.tsv.gz']:
download(url+'rr02_more_data/'+file,
outdir+file)
# tar = tarfile_open(outdir+'/rr02_more_data.tar.gz', 'r:gz')
# tar.extractall(outdir)
# tar.close()
# shutil_move(outdir+'/rr02_more_data/compounds.tsv',
# outdir+'/rr_compounds.tsv')
# shutil_move(outdir+'/rr02_more_data/rxn_recipes.tsv',
# outdir)
# os_rm(outdir+'/rr02_more_data.tar.gz')
# shutil_rmtree(outdir+'/rr02_more_data')
if file=='retrorules_rr02_flat_all.tsv.gz':
download(url+'retrorules_rr02_rp3_hs/'+file,
outdir+file)
# download('https://retrorules.org/dl/preparsed/rr02/rp3/hs',
# outdir+'/retrorules_rr02_rp3_hs.tar.gz')
# tar = tarfile_open(outdir+'/retrorules_rr02_rp3_hs.tar.gz', 'r:gz')
# tar.extractall(outdir)
# tar.close()
# shutil_move(outdir+'/retrorules_rr02_rp3_hs/retrorules_rr02_flat_all.tsv', outdir+'/rules_rall.tsv')
# os_rm(outdir+'/retrorules_rr02_rp3_hs.tar.gz')
# shutil_rmtree(outdir+'/retrorules_rr02_rp3_hs')
##########################################################
################## Private Functions #####################
##########################################################
## Method to load data from file
#
# Load data from file
#
# @param self Object pointer
# @param filename File to fetch data from
# @return file content
@staticmethod
def _load_cache_from_file(filename):
if filename.endswith('.gz') or filename.endswith('.zip'):
fp = gzip_open(filename, 'rt', encoding='ascii')
else:
fp = open(filename, 'r')
return json_load(fp)
## Method to store data into file
#
# Store data into file as json (to store dictionnary structure)
#
# @param self Object pointer
# @param data Data to write into file
# @param filename File to write data into
@staticmethod
def _store_cache_to_file(data, filename):
if filename.endswith('.gz') or filename.endswith('.zip'):
fp = gzip_open(filename, 'wt', encoding='ascii')
else:
fp = open(filename, 'w')
json_dump(data, fp)
## Method to store data into redis database
#
# Assign a CRedisDict object to the attribute to copy data into the database
#
# @param self Object pointer
# @param attr_name Attribute name (database key)
# @param data Content of the attribute
def _store_cache_to_db(self, attr_name, data):
print("Storing "+attr_name+" to db...", end = '', flush=True)
setattr(rpCache, attr_name, CRedisDict(attr_name, self.redis, data))
print_OK()
## Function to create a dictionnary of old to new chemical id's
#
# Generate a one-to-one dictionnary of old id's to new ones. Private function
#
# TODO: check other things about the mnxm emtry like if it has the right structure etc...
@staticmethod
def _checkCIDdeprecated(cid, deprecatedCID_cid):
try:
return deprecatedCID_cid[cid]
except (KeyError, TypeError):
return cid
## Function to create a dictionnary of old to new reaction id's
#
# TODO: check other things about the mnxm emtry like if it has the right structure etc...
@staticmethod
def _checkRIDdeprecated(rid, deprecatedRID_rid):
try:
return deprecatedRID_rid[rid]
except (KeyError, TypeError):
return rid
#################################################################
################## Public functions #############################
#################################################################
########################### MNX parsers #############################
## Function to parse the chem_xref.tsv and reac_xref.tsv file of MetanetX
#
# Generate a dictionnary of old to new MetanetX identifiers to make sure that we always use the freshest id's.
# This can include more than one old id per new one and thus returns a dictionnary. Private function
#
# @param xref_path Input file path
# @return Dictionnary of identifiers
#TODO: save the self.deprecatedCID_cid to be used in case there rp_paths uses an old version of MNX
@staticmethod
def _deprecatedMNX(xref_path):
deprecatedMNX_mnx = {}
with gzip_open(xref_path, 'rt') as f:
c = csv_reader(f, delimiter='\t')
for row in c:
if not row[0][0]=='#':
mnx = row[0].split(':')
if mnx[0]=='deprecated':
deprecatedMNX_mnx[mnx[1]] = row[1]
return deprecatedMNX_mnx
## Status function that parses the chem_xref.tsv file for chemical cross-references and the different
#
# @param chem_xref_path Input file path
# @return Dictionnary of chemical id to other chemical ids ex: deprecatedCID_cid['MNXM1'] = {'mnx': ['MNXM01', ...], ...}
@staticmethod
def _m_deprecatedMNXM(chem_xref_path):
deprecatedCID_cid = {}
deprecatedCID_cid = rpCache._deprecatedMNX(chem_xref_path)
deprecatedCID_cid.update(rpCache._convertMNXM)
deprecatedCID_cid['MNXM01'] = 'MNXM1'
return deprecatedCID_cid
## Function to parse the reac_xref.tsv file of MetanetX
#
# Generate a dictionnary of old to new MetanetX identifiers to make sure that we always use the freshest id's.
# This can include more than one old id per new one and thus returns a dictionnary. Private function
#
# @param self Object pointer
# @param reac_xref_path Input file path
# @return Dictionnary of identifiers
@staticmethod
def _m_deprecatedMNXR(reac_xref_path):
return rpCache._deprecatedMNX(reac_xref_path)
## Function to parse the chemp_prop.tsv file from MetanetX and compounds.tsv from RetroRules. Uses the InchIkey as key to the dictionnary
#
# Generate a dictionnary gaving the formula, smiles, inchi and inchikey for the components
# TODO: Seperate this function to parse the chem_prop (mnx specific) and the compounds.tsv from RetroRules (generic, not mnx specific)
# Structure of return: cid_strc['MNXM1'] = {'formula': 'H', 'smiles': '[H+]', 'inchi': 'InChI=1S/p+1', 'inchikey': 'GPRLSGONYQIRFK-UHFFFAOYSA-N'}
#
# @param rr_compounds_path Path to the RetroRules file
# @param chem_prop_path Path to the chem_prop.tsv file
# @param deprecatedCID_cid Dictionnary of deprecated CID to cid
# @return cid_strc Dictionnary of formula, smiles, inchi and inchikey
@staticmethod
def _m_mnxm_strc(rr_compounds_path, chem_prop_path, deprecatedCID_cid):
cid_strc = {}
cid_name = {}
for row in csv_DictReader(gzip_open(rr_compounds_path, 'rt'), delimiter='\t'):
tmp = {'formula': None,
'smiles': None,
'inchi': row['inchi'],
'inchikey': None,
'cid': rpCache._checkCIDdeprecated(row['cid'], deprecatedCID_cid),
'name': None}
try:
resConv = rpCache._convert_depiction(idepic=tmp['inchi'], itype='inchi', otype={'smiles','inchikey'})
for i in resConv:
tmp[i] = resConv[i]
except rpCache.DepictionError as e:
rpCache.logger.warning('Could not convert some of the structures: '+str(tmp))
rpCache.logger.warning(e)
cid_strc[tmp['cid']] = tmp
with gzip_open(chem_prop_path, 'rt') as f:
c = csv_reader(f, delimiter='\t')
for row in c:
if not row[0][0]=='#':
mnxm = rpCache._checkCIDdeprecated(row[0], deprecatedCID_cid)
tmp = {'formula': row[2],
'smiles': row[6],
'inchi': row[5],
'inchikey': row[8],
'cid': mnxm,
'name': row[1]}
for i in tmp:
if tmp[i]=='' or tmp[i]=='NA':
tmp[i] = None
if not mnxm in cid_name and tmp['name']:
cid_name[mnxm] = tmp['name']
if mnxm in cid_strc:
cid_strc[mnxm]['formula'] = row[2]
cid_strc[mnxm]['name'] = row[1]
if not cid_strc[mnxm]['smiles'] and tmp['smiles']:
cid_strc[mnxm]['smiles'] = tmp['smiles']
if not cid_strc[mnxm]['inchikey'] and tmp['inchikey']:
cid_strc[mnxm]['inchikey'] = tmp['inchikey']
else:
#check to see if the inchikey is valid or not
otype = set({})
if not tmp['inchikey']:
otype.add('inchikey')
if not tmp['smiles']:
otype.add('smiles')
if not tmp['inchi']:
otype.add('inchi')
itype = ''
if tmp['inchi']:
itype = 'inchi'
elif tmp['smiles']:
itype = 'smiles'
else:
rpCache.logger.warning('No valid entry for the convert_depiction function')
continue
try:
resConv = rpCache._convert_depiction(idepic=tmp[itype], itype=itype, otype=otype)
for i in resConv:
tmp[i] = resConv[i]
except rpCache.DepictionError as e:
rpCache.logger.warning('Could not convert some of the structures: '+str(tmp))
rpCache.logger.warning(e)
cid_strc[tmp['cid']] = tmp
return cid_strc, cid_name
## Function to parse the chem_xref.tsv file of MetanetX
#
# Generate a dictionnary of all cross references for a given chemical id (MNX) to other database id's
#
# @param chem_xref_path MetaNetX chem_xref.tsv file path
# @param deprecatedCID_cid Dictionnary of deprecated chemical ids to uniform cid
# @return Dictionnary of cross references of a given chemical id
#TODO: save the self.deprecatedCID_cid to be used in case there rp_paths uses an old version of MNX
@staticmethod
def _m_mnxm_xref(chem_xref_path, deprecatedCID_cid):
cid_xref = {}
with gzip_open(chem_xref_path, 'rt') as f:
c = csv_reader(f, delimiter='\t')
for row in c:
if not row[0][0]=='#':
mnx = rpCache._checkCIDdeprecated(row[1], deprecatedCID_cid)
if len(row[0].split(':'))==1:
dbName = 'mnx'
dbId = row[0]
else:
dbName = row[0].split(':')[0]
dbId = ''.join(row[0].split(':')[1:])
if dbName=='deprecated':
dbName = 'mnx'
#mnx
if not mnx in cid_xref:
cid_xref[mnx] = {}
if not dbName in cid_xref[mnx]:
cid_xref[mnx][dbName] = []
if not dbId in cid_xref[mnx][dbName]:
cid_xref[mnx][dbName].append(dbId)
### DB ###
if not dbName in cid_xref:
cid_xref[dbName] = {}
if not dbId in cid_xref[dbName]:
cid_xref[dbName][dbId] = mnx
return cid_xref
## Function to parse the comp_xref.tsv file of MetanetX
#
# Generate a dictionnary of compartments id's (MNX) to other database id's
#
# @param comp_xref_path The MetaNetX file that contains the cross references
# @return a The dictionnary of compartment identifiers
#TODO: save the self.deprecatedCID_cid to be used in case there rp_paths uses an old version of MNX
@staticmethod
def _m_mnxc_xref(comp_xref_path):
comp_xref = {}
deprecatedCompID_compid = {}
try:
with gzip_open(comp_xref_path, 'rt') as f:
c = csv_reader(f, delimiter='\t')
#not_recognised = []
for row in c:
#cid = row[0].split(':')
if not row[0][0]=='#':
#collect the info
mnxc = row[1]
if len(row[0].split(':'))==1:
dbName = 'mnx'
dbCompId = row[0]
else:
dbName = row[0].split(':')[0]
dbCompId = ''.join(row[0].split(':')[1:])
dbCompId = dbCompId.lower()
if dbName=='deprecated':
dbName = 'mnx'
#create the dicts
if not mnxc in comp_xref:
comp_xref[mnxc] = {}
if not dbName in comp_xref[mnxc]:
comp_xref[mnxc][dbName] = []
if not dbCompId in comp_xref[mnxc][dbName]:
comp_xref[mnxc][dbName].append(dbCompId)
#create the reverse dict
if not dbCompId in deprecatedCompID_compid:
deprecatedCompID_compid[dbCompId] = mnxc
except FileNotFoundError:
rpCache.logger.error('comp_xref file not found')
return {}
return comp_xref,deprecatedCompID_compid
######################## RetroRules specific functions ##################
## Function to parse the rules_rall.tsv from RetroRules
#
# Extract from the reactions rules the ruleID, the reactionID, the direction of the rule directed to the origin reaction
# Structure of the return: rr_reactions['RR-02-d2e7c5761b5a9b4b-04-F'] = {'MNXR139133': {'rule_id': 'RR-02-d2e7c5761b5a9b4b-04-F', 'rule_score': 0.3151075983206353, 'reac_id': 'MNXR139133', 'subs_id': 'MNXM89557', 'rel_direction': 1, 'left': {'MNXM89557': 1}, 'right': {'MNXM20': 1, 'MNXM722724': 1}}}
#
# @param rules_rall_path Path to the RetroRules reaction rules
# @param deprecatedCID_cid Dictionnary of deprecated to uniformed chemical id's
# @param deprecatedRID_rid Dictionnary of deprecated to uniformed reaction id's
# @return Dictionnary describing each reaction rule
@staticmethod
def _m_rr_reactions(rules_rall_path, deprecatedCID_cid, deprecatedRID_rid):
rr_reactions = {}
try:
#with gzip_open(rules_rall_path, 'r') as f:
# reader = csv.reader(f, delimiter = '\t')
# next(reader)
# rule = {}
# for row in reader:
for row in csv_DictReader(gzip_open(rules_rall_path, 'rt'), delimiter='\t'):
#NOTE: as of now all the rules are generated using MNX
#but it may be that other db are used, we are handling this case
#WARNING: can have multiple products so need to seperate them
products = {}
for i in row['Product_IDs'].split('.'):
cid = rpCache._checkCIDdeprecated(i, deprecatedCID_cid)
if not cid in products:
products[cid] = 1
else:
products[cid] += 1
try:
#WARNING: one reaction rule can have multiple reactions associated with them
#To change when you can set subpaths from the mutliple numbers of
#we assume that the reaction rule has multiple unique reactions associated
if row['# Rule_ID'] not in rr_reactions:
rr_reactions[row['# Rule_ID']] = {}
if row['# Rule_ID'] in rr_reactions[row['# Rule_ID']]:
rpCache.logger.warning('There is already reaction '+str(row['# Rule_ID'])+' in reaction rule '+str(row['# Rule_ID']))
rr_reactions[row['# Rule_ID']][row['Reaction_ID']] = {
'rule_id': row['# Rule_ID'],
'rule_score': float(row['Score_normalized']),
'reac_id': rpCache._checkRIDdeprecated(row['Reaction_ID'], deprecatedRID_rid),
'subs_id': rpCache._checkCIDdeprecated(row['Substrate_ID'], deprecatedCID_cid),
'rel_direction': int(row['Rule_relative_direction']),
'left': {rpCache._checkCIDdeprecated(row['Substrate_ID'], deprecatedCID_cid): 1},
'right': products}
except ValueError:
rpCache.logger.error('Problem converting rel_direction: '+str(row['Rule_relative_direction']))
rpCache.logger.error('Problem converting rule_score: '+str(row['Score_normalized']))
return rr_reactions
except FileNotFoundError as e:
rpCache.logger.error('Could not read the rules_rall file ('+str(rules_rall_path)+')')
return {}
## Generate complete reactions from the rxn_recipes.tsv from RetroRules
#
# These are the compplete reactions from which the reaction rules are generated from. This is used to
# reconstruct the full reactions from monocomponent reactions
# Structur of the return: rr_full_reactions['MNXR142257'] = {'left': {'MNXM4660': 1}, 'right': {'MNXM97172': 1}, 'direction': 0, 'main_left': ['MNXM4660'], 'main_right': ['MNXM97172']}
#
# @param self The pointer object
# @param rxn_recipes_path Path to the recipes file
# @return Boolean that determines the success or failure of the function
@staticmethod
def _m_rr_full_reactions(rxn_recipes_path, deprecatedCID_cid, deprecatedRID_rid):
#### for character matching that are returned
DEFAULT_STOICHIO_RESCUE = {"4n": 4, "3n": 3, "2n": 2, 'n': 1,
'(n)': 1, '(N)': 1, '(2n)': 2, '(x)': 1,
'N': 1, 'm': 1, 'q': 1,
'0.01': 1, '0.1': 1, '0.5': 1, '1.5': 1,
'0.02': 1, '0.2': 1,
'(n-1)': 0, '(n-2)': -1}
reaction = {}
try:
for row in csv_DictReader(gzip_open(rxn_recipes_path, 'rt'), delimiter='\t'):
tmp = {} # makes sure that if theres an error its not added
#parse the reaction equation
if not len(row['Equation'].split('='))==2:
rpCache.logger.warning('There should never be more or less than a left and right of an equation')
rpCache.logger.warnin(row['Equation'])
continue
######### LEFT ######
#### MNX id
tmp['left'] = {}
# if row['#Reaction_ID']=="MNXR141948":
# print(row)
# exit()
for spe in re_findall(r'(\(n-1\)|\d+|4n|3n|2n|n|\(n\)|\(N\)|\(2n\)|\(x\)|N|m|q|\(n\-2\)|\d+\.\d+) ([\w\d]+)@\w+', row['Equation'].split('=')[0]):
#1) try to rescue if its one of the values
try:
tmp['left'][rpCache._checkCIDdeprecated(spe[1], deprecatedCID_cid)] = DEFAULT_STOICHIO_RESCUE[spe[0]]
except KeyError:
#2) try to convert to int if its not
try:
tmp['left'][rpCache._checkCIDdeprecated(spe[1], deprecatedCID_cid)] = int(spe[0])
except ValueError:
rpCache.logger.warning('Cannot convert '+str(spe[0]))
continue
####### RIGHT #####
#### MNX id
tmp['right'] = {}
for spe in re_findall(r'(\(n-1\)|\d+|4n|3n|2n|n|\(n\)|\(N\)|\(2n\)|\(x\)|N|m|q|\(n\-2\)|\d+\.\d+) ([\w\d]+)@\w+', row['Equation'].split('=')[1]):
#1) try to rescue if its one of the values
try:
tmp['right'][rpCache._checkCIDdeprecated(spe[1], deprecatedCID_cid)] = DEFAULT_STOICHIO_RESCUE[spe[0]]
except KeyError:
#2) try to convert to int if its not
try:
tmp['right'][rpCache._checkCIDdeprecated(spe[1], deprecatedCID_cid)] = int(spe[0])
except ValueError:
rpCache.logger.warning('Cannot convert '+str(spe[0]))
continue
####### DIRECTION ######
try:
tmp['direction'] = int(row['Direction'])
except ValueError:
rpCache.logger.error('Cannot convert '+str(row['Direction'])+' to int')
continue
### add the others
tmp['main_left'] = row['Main_left'].split(',')
tmp['main_right'] = row['Main_right'].split(',')
reaction[rpCache._checkRIDdeprecated(row['#Reaction_ID'], deprecatedRID_rid)] = tmp
return reaction
except FileNotFoundError:
rpCache.logger.error('Cannot find file: '+str(rxn_recipes_path))
return False
######################## Generic functions ###############################
## Convert chemical depiction to others type of depictions
#
# Usage example:
# - convert_depiction(idepic='CCO', otype={'inchi', 'smiles', 'inchikey'})
# - convert_depiction(idepic='InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3', itype='inchi', otype={'inchi', 'smiles', 'inchikey'})
#
# @param self The object pointer
# @param idepic String depiction to be converted, str
# @param itype type of depiction provided as input, str
# @param otype types of depiction to be generated, {"", "", ..}
# @return odepic generated depictions, {"otype1": "odepic1", ..}
@staticmethod
def _convert_depiction(idepic, itype='smiles', otype={'inchikey'}):
# Import (if needed)
if itype == 'smiles':
rdmol = MolFromSmiles(idepic, sanitize=True)
elif itype == 'inchi':
rdmol = MolFromInchi(idepic, sanitize=True)
else:
raise NotImplementedError('"{}" is not a valid input type'.format(itype))
if rdmol is None: # Check imprt
raise rpCache.DepictionError('Import error from depiction "{}" of type "{}"'.format(idepic, itype))
# Export
odepic = dict()
for item in otype:
if item == 'smiles':
odepic[item] = MolToSmiles(rdmol) # MolToSmiles is tricky, one mays want to check the possible options..
elif item == 'inchi':
odepic[item] = MolToInchi(rdmol)
elif item == 'inchikey':
odepic[item] = MolToInchiKey(rdmol)
else:
raise NotImplementedError('"{}" is not a valid output type'.format(otype))
return odepic
## Function to parse the chem_xref.tsv file of MetanetX
#
# Generate a dictionnary of all cross references for a given chemical id (MNX) to other database id's
# Structure if the return: chebi_cid['88281']: 'MXM2323'
#
# @param self Object pointer
# @param chem_xref_path Input file path
# @return a The dictionnary of identifiers
#TODO: save the self.deprecatedCID_cid to be used in case there rp_paths uses an old version of MNX
# def _m_chebi_cid(self, cid_xref):
@staticmethod
def _m_chebi_cid(cid_xref):
chebi_cid = {}
for cid in cid_xref:
if 'chebi' in cid_xref[cid]:
for c in cid_xref[cid]['chebi']:
chebi_cid[c] = cid
return chebi_cid
## Function to build the dictionnary to find the chemical id from inchikey
#
# @param cid_strc Dictionnary of chemical ID's to all the structure information associated with it
# @return Dictionnary of InChIKey to chemical ID
@staticmethod
def _m_inchikey_cid(cid_strc):
inchikey_cid = {}
for cid in cid_strc:
inchikey = cid_strc[cid]['inchikey']
# This line is needed to put a value in 'inchikey', otherwise there are some problems in future strucutres
if not inchikey: inchikey = 'NO_INCHIKEY'
if not inchikey in inchikey_cid:
inchikey_cid[inchikey] = []
inchikey_cid[inchikey].append(cid)
return inchikey_cid
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA
_exc = {}
# Time
for exc_data in [
{LEMMA: "قبل الميلاد", ORTH: "ق.م"},
{LEMMA: "بعد الميلاد", ORTH: "ب. م"},
{LEMMA: "ميلادي", ORTH: ".م"},
{LEMMA: "هجري", ORTH: ".هـ"},
{LEMMA: "توفي", ORTH: ".ت"},
]:
_exc[exc_data[ORTH]] = [exc_data]
# Scientific abv.
for exc_data in [
{LEMMA: "صلى الله عليه وسلم", ORTH: "صلعم"},
{LEMMA: "الشارح", ORTH: "الشـ"},
{LEMMA: "الظاهر", ORTH: "الظـ"},
{LEMMA: "أيضًا", ORTH: "أيضـ"},
{LEMMA: "إلى آخره", ORTH: "إلخ"},
{LEMMA: "انتهى", ORTH: "اهـ"},
{LEMMA: "حدّثنا", ORTH: "ثنا"},
{LEMMA: "حدثني", ORTH: "ثنى"},
{LEMMA: "أنبأنا", ORTH: "أنا"},
{LEMMA: "أخبرنا", ORTH: "نا"},
{LEMMA: "مصدر سابق", ORTH: "م. س"},
{LEMMA: "مصدر نفسه", ORTH: "م. ن"},
]:
_exc[exc_data[ORTH]] = [exc_data]
# Other abv.
for exc_data in [
{LEMMA: "دكتور", ORTH: "د."},
{LEMMA: "أستاذ دكتور", ORTH: "أ.د"},
{LEMMA: "أستاذ", ORTH: "أ."},
{LEMMA: "بروفيسور", ORTH: "ب."},
]:
_exc[exc_data[ORTH]] = [exc_data]
for exc_data in [{LEMMA: "تلفون", ORTH: "ت."}, {LEMMA: "صندوق بريد", ORTH: "ص.ب"}]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = _exc
|
import filer.fields.file
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("cms", "0003_auto_20140926_2347"),
("filer", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="PageMeta",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"og_type",
models.CharField(
help_text="Use Article for generic pages.",
max_length=255,
verbose_name="Resource type",
choices=[(b"article", "Article"), (b"website", "Website")],
),
),
(
"og_author_url",
models.CharField(default=b"", max_length=255, verbose_name="Author Facebook URL", blank=True),
),
(
"og_author_fbid",
models.CharField(
default=b"",
help_text="Use Facebook numeric ID",
max_length=16,
verbose_name="Author Facebook ID",
blank=True,
),
),
(
"og_publisher",
models.CharField(default=b"", max_length=255, verbose_name="Website Facebook URL", blank=True),
),
(
"og_app_id",
models.CharField(default=b"", max_length=255, verbose_name="Facebook App ID", blank=True),
),
(
"twitter_author",
models.CharField(
default=b"",
help_text='"@" sign not required.',
max_length=255,
verbose_name="Author Twitter Account",
blank=True,
),
),
(
"twitter_site",
models.CharField(
default=b"",
help_text='"@" sign not required.',
max_length=255,
verbose_name="Website Twitter Account",
blank=True,
),
),
(
"twitter_type",
models.CharField(
max_length=255,
verbose_name="Resource type",
choices=[
(b"summary", "Summary"),
(b"summary_large_image", "Summary large image"),
(b"product", "Product"),
(b"photo", "Photo"),
(b"player", "Player"),
(b"app", "App"),
],
),
),
(
"gplus_author",
models.CharField(
default=b"",
help_text='Use the Google+ Name (together with "+") or the complete path to the page.',
max_length=255,
verbose_name="Author Google+ URL",
blank=True,
),
),
(
"gplus_type",
models.CharField(
help_text="Use Article for generic pages.",
max_length=255,
verbose_name="Resource type",
choices=[
(b"Article", "Article"),
(b"Blog", "Blog"),
(b"Book", "Book"),
(b"Event", "Event"),
(b"LocalBusiness", "LocalBusiness"),
(b"Organization", "Organization"),
(b"Person", "Person"),
(b"Product", "Product"),
(b"Review", "Review"),
],
),
),
("extended_object", models.OneToOneField(editable=False, to="cms.Page", on_delete=models.CASCADE)),
(
"image",
filer.fields.file.FilerFileField(
related_name="djangocms_page_meta_page",
blank=True,
to="filer.File",
help_text="Used if title image is empty.",
null=True,
on_delete=models.CASCADE,
),
),
(
"og_author",
models.ForeignKey(
verbose_name="Author account",
blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
on_delete=models.CASCADE,
),
),
(
"public_extension",
models.OneToOneField(
related_name="draft_extension",
null=True,
editable=False,
to="djangocms_page_meta.PageMeta",
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name": "Page meta info (all languages)",
},
bases=(models.Model,),
),
migrations.CreateModel(
name="TitleMeta",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("keywords", models.CharField(default=b"", max_length=400, blank=True)),
("description", models.CharField(default=b"", max_length=400, blank=True)),
(
"og_description",
models.CharField(default=b"", max_length=400, verbose_name="Facebook Description", blank=True),
),
(
"twitter_description",
models.CharField(default=b"", max_length=140, verbose_name="Twitter Description", blank=True),
),
(
"gplus_description",
models.CharField(default=b"", max_length=400, verbose_name="Google+ Description", blank=True),
),
("extended_object", models.OneToOneField(editable=False, to="cms.Title", on_delete=models.CASCADE)),
(
"image",
filer.fields.file.FilerFileField(
related_name="djangocms_page_meta_title",
blank=True,
to="filer.File",
help_text="If empty, page image will be used for all languages.",
null=True,
on_delete=models.CASCADE,
),
),
(
"public_extension",
models.OneToOneField(
related_name="draft_extension",
null=True,
editable=False,
to="djangocms_page_meta.TitleMeta",
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name": "Page meta info (language-dependent)",
},
bases=(models.Model,),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceKidsTokenCreateModel import AlipayCommerceKidsTokenCreateModel
class AlipayCommerceKidsTokenCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceKidsTokenCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceKidsTokenCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.kids.token.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
from __future__ import division
import inspect
import itertools
import numbers
import pickle
import sys
import warnings
from collections import OrderedDict, defaultdict
from collections.abc import Iterable, Mapping, Sequence
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda
from nvtx import annotate
from pandas._config import get_option
from pandas.api.types import is_dict_like
from pandas.io.formats import console
from pandas.io.formats.printing import pprint_thing
import cudf
from cudf import _lib as libcudf
from cudf._lib.null_mask import MaskState, create_null_mask
from cudf.core import column, reshape
from cudf.core.abc import Serializable
from cudf.core.column import as_column, column_empty
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.frame import Frame
from cudf.core.groupby.groupby import DataFrameGroupBy
from cudf.core.index import Index, RangeIndex, as_index
from cudf.core.indexing import _DataFrameIlocIndexer, _DataFrameLocIndexer
from cudf.core.series import Series
from cudf.core.window import Rolling
from cudf.utils import applyutils, docutils, ioutils, queryutils, utils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import (
cudf_dtype_from_pydata_dtype,
find_common_type,
is_categorical_dtype,
is_column_like,
is_datetime_dtype,
is_list_dtype,
is_list_like,
is_scalar,
is_string_dtype,
is_struct_dtype,
numeric_normalize_types,
)
from cudf.utils.utils import OrderedColumnDict
def _unique_name(existing_names, suffix="_unique_name"):
ret = suffix
i = 1
while ret in existing_names:
ret = "%s_%d" % (suffix, i)
i += 1
return ret
def _reverse_op(fn):
return {
"add": "radd",
"radd": "add",
"sub": "rsub",
"rsub": "sub",
"mul": "rmul",
"rmul": "mul",
"mod": "rmod",
"rmod": "mod",
"pow": "rpow",
"rpow": "pow",
"floordiv": "rfloordiv",
"rfloordiv": "floordiv",
"truediv": "rtruediv",
"rtruediv": "truediv",
"__add__": "__radd__",
"__radd__": "__add__",
"__sub__": "__rsub__",
"__rsub__": "__sub__",
"__mul__": "__rmul__",
"__rmul__": "__mul__",
"__mod__": "__rmod__",
"__rmod__": "__mod__",
"__pow__": "__rpow__",
"__rpow__": "__pow__",
"__floordiv__": "__rfloordiv__",
"__rfloordiv__": "__floordiv__",
"__truediv__": "__rtruediv__",
"__rtruediv__": "__truediv__",
}[fn]
_cupy_nan_methods_map = {
"min": "nanmin",
"max": "nanmax",
"sum": "nansum",
"prod": "nanprod",
"mean": "nanmean",
"std": "nanstd",
"var": "nanvar",
}
class DataFrame(Frame, Serializable):
_internal_names = {"_data", "_index"}
@annotate("DATAFRAME_INIT", color="blue", domain="cudf_python")
def __init__(self, data=None, index=None, columns=None, dtype=None):
"""
A GPU Dataframe object.
Parameters
----------
data : array-like, Iterable, dict, or DataFrame.
Dict can contain Series, arrays, constants, or list-like objects.
index : Index or array-like
Index to use for resulting frame. Will default to
RangeIndex if no indexing information part of input data and
no index provided.
columns : Index or array-like
Column labels to use for resulting frame.
Will default to RangeIndex (0, 1, 2, …, n) if no column
labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed.
If None, infer.
Examples
--------
Build dataframe with ``__setitem__``:
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df
key val
0 0 10.0
1 1 11.0
2 2 12.0
3 3 13.0
4 4 14.0
Build DataFrame via dict of columns:
>>> import numpy as np
>>> from datetime import datetime, timedelta
>>> t0 = datetime.strptime('2018-10-07 12:00:00', '%Y-%m-%d %H:%M:%S')
>>> n = 5
>>> df = cudf.DataFrame({
... 'id': np.arange(n),
... 'datetimes': np.array(
... [(t0+ timedelta(seconds=x)) for x in range(n)])
... })
>>> df
id datetimes
0 0 2018-10-07T12:00:00.000
1 1 2018-10-07T12:00:01.000
2 2 2018-10-07T12:00:02.000
3 3 2018-10-07T12:00:03.000
4 4 2018-10-07T12:00:04.000
Build DataFrame via list of rows as tuples:
>>> df = cudf.DataFrame([
... (5, "cats", "jump", np.nan),
... (2, "dogs", "dig", 7.5),
... (3, "cows", "moo", -2.1, "occasionally"),
... ])
>>> df
0 1 2 3 4
0 5 cats jump <NA> <NA>
1 2 dogs dig 7.5 <NA>
2 3 cows moo -2.1 occasionally
Convert from a Pandas DataFrame:
>>> import pandas as pd
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> pdf
a b
0 0 0.1
1 1 0.2
2 2 NaN
3 3 0.3
>>> df = cudf.from_pandas(pdf)
>>> df
a b
0 0 0.1
1 1 0.2
2 2 <NA>
3 3 0.3
"""
super().__init__()
if isinstance(columns, (Series, cudf.Index)):
columns = columns.to_pandas()
if isinstance(data, ColumnAccessor):
if index is None:
index = as_index(range(data.nrows))
else:
index = as_index(index)
self._index = index
if columns is not None:
self._data = data
self._reindex(columns=columns, deep=True, inplace=True)
else:
self._data = data
elif isinstance(data, (DataFrame, pd.DataFrame)):
if isinstance(data, pd.DataFrame):
data = self.from_pandas(data)
if index is not None:
if not data.index.equals(index):
data = data.reindex(index)
index = data._index
else:
index = as_index(index)
else:
index = data._index
self._index = index
if columns is not None:
self._data = data._data
self._reindex(
columns=columns, index=index, deep=False, inplace=True
)
else:
self._data = data._data
self.columns = data.columns
elif data is None:
if index is None:
self._index = RangeIndex(0)
else:
self._index = as_index(index)
if columns is not None:
self._data = ColumnAccessor(
OrderedDict.fromkeys(
columns,
column.column_empty(
len(self), dtype="object", masked=True
),
)
)
elif hasattr(data, "__cuda_array_interface__"):
arr_interface = data.__cuda_array_interface__
# descr is an optional field of the _cuda_ary_iface_
if "descr" in arr_interface:
if len(arr_interface["descr"]) == 1:
new_df = self._from_arrays(
data, index=index, columns=columns
)
else:
new_df = self.from_records(
data, index=index, columns=columns
)
else:
new_df = self._from_arrays(data, index=index, columns=columns)
self._data = new_df._data
self.index = new_df._index
self.columns = new_df.columns
elif hasattr(data, "__array_interface__"):
arr_interface = data.__array_interface__
if len(arr_interface["descr"]) == 1:
# not record arrays
new_df = self._from_arrays(data, index=index, columns=columns)
else:
new_df = self.from_records(data, index=index, columns=columns)
self._data = new_df._data
self.index = new_df._index
self.columns = new_df.columns
else:
if is_list_like(data):
if len(data) > 0 and is_scalar(data[0]):
new_df = self._from_columns(
[data], index=index, columns=columns
)
self._data = new_df._data
self.index = new_df._index
self.columns = new_df.columns
elif len(data) > 0 and isinstance(data[0], Series):
self._init_from_series_list(
data=data, columns=columns, index=index
)
else:
self._init_from_list_like(
data, index=index, columns=columns
)
else:
if not is_dict_like(data):
raise TypeError("data must be list or dict-like")
self._init_from_dict_like(data, index=index, columns=columns)
if dtype:
self._data = self.astype(dtype)._data
def _init_from_series_list(self, data, columns, index):
if index is None:
# When `index` is `None`, the final index of
# resulting dataframe will be union of
# all Series's names.
final_index = as_index(_get_union_of_series_names(data))
else:
# When an `index` is passed, the final index of
# resulting dataframe will be whatever
# index passed, but will need
# shape validations - explained below
data_length = len(data)
index_length = len(index)
if data_length != index_length:
# If the passed `index` length doesn't match
# length of Series objects in `data`, we must
# check if `data` can be duplicated/expanded
# to match the length of index. For that we
# check if the length of index is a factor
# of length of data.
#
# 1. If yes, we extend data
# until length of data is equal to length of index.
# 2. If no, we throw an error stating the
# shape of resulting `data` and `index`
# Simple example
# >>> import pandas as pd
# >>> s = pd.Series([1, 2, 3])
# >>> pd.DataFrame([s], index=['a', 'b'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# >>> pd.DataFrame([s], index=['a', 'b', 'c'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# c 1 2 3
if index_length % data_length == 0:
initial_data = data
data = []
for _ in range(int(index_length / data_length)):
data.extend([o for o in initial_data])
else:
raise ValueError(
f"Shape of passed values is "
f"{(data_length, len(data[0]))}, "
f"indices imply {(index_length, len(data[0]))}"
)
final_index = as_index(index)
series_lengths = list(map(lambda x: len(x), data))
data = numeric_normalize_types(*data)
if series_lengths.count(series_lengths[0]) == len(series_lengths):
# Calculating the final dataframe columns by
# getting union of all `index` of the Series objects.
final_columns = _get_union_of_indices([d.index for d in data])
for idx, series in enumerate(data):
if not series.index.is_unique:
raise ValueError(
"Reindexing only valid with uniquely valued Index "
"objects"
)
if not series.index.equals(final_columns):
series = series.reindex(final_columns)
self._data[idx] = column.as_column(series._column)
# Setting `final_columns` to self._index so
# that the resulting `transpose` will be have
# columns set to `final_columns`
self._index = final_columns
transpose = self.T
else:
concat_df = cudf.concat(data, axis=1)
if concat_df.columns.dtype == "object":
concat_df.columns = concat_df.columns.astype("str")
transpose = concat_df.T
transpose._index = final_index
self._data = transpose._data
self._index = transpose._index
# If `columns` is passed, the result dataframe
# contain a dataframe with only the
# specified `columns` in the same order.
if columns:
for col_name in columns:
if col_name not in self._data:
self._data[col_name] = column.column_empty(
row_count=len(self), dtype=None, masked=True
)
self._data = self._data.select_by_label(columns)
def _init_from_list_like(self, data, index=None, columns=None):
if index is None:
index = RangeIndex(start=0, stop=len(data))
else:
index = as_index(index)
self._index = as_index(index)
# list-of-dicts case
if len(data) > 0 and isinstance(data[0], dict):
data = DataFrame.from_pandas(pd.DataFrame(data))
self._data = data._data
else:
data = list(itertools.zip_longest(*data))
if columns is not None and len(data) == 0:
data = [
cudf.core.column.column_empty(row_count=0, dtype=None)
for _ in columns
]
for col_name, col in enumerate(data):
self._data[col_name] = column.as_column(col)
if columns:
self.columns = columns
def _init_from_dict_like(self, data, index=None, columns=None):
data = data.copy()
num_rows = 0
if columns is not None:
# remove all entries in `data` that are
# not in `columns`
keys = [key for key in data.keys() if key in columns]
data = {key: data[key] for key in keys}
extra_cols = [col for col in columns if col not in data.keys()]
if keys:
# if keys is non-empty,
# add null columns for all values
# in `columns` that don't exist in `keys`:
data.update({key: None for key in extra_cols})
else:
# if keys is empty,
# it means that none of the actual keys in `data`
# matches with `columns`.
# Hence only assign `data` with `columns` as keys
# and their values as empty columns.
data = {
key: cudf.core.column.column_empty(row_count=0, dtype=None)
for key in extra_cols
}
data, index = self._align_input_series_indices(data, index=index)
if index is None:
if data:
col_name = next(iter(data))
if is_scalar(data[col_name]):
num_rows = num_rows or 1
else:
data[col_name] = column.as_column(data[col_name])
num_rows = len(data[col_name])
self._index = RangeIndex(0, num_rows)
else:
self._index = as_index(index)
if len(data):
self._data.multiindex = True
for (i, col_name) in enumerate(data):
self._data.multiindex = self._data.multiindex and isinstance(
col_name, tuple
)
self.insert(i, col_name, data[col_name])
if columns is not None:
self.columns = columns
@classmethod
def _from_table(cls, table, index=None):
if index is None:
if table._index is not None:
index = Index._from_table(table._index)
else:
index = RangeIndex(table._num_rows)
out = cls.__new__(cls)
out._data = table._data
out._index = index
return out
@staticmethod
def _align_input_series_indices(data, index):
data = data.copy()
input_series = [
Series(val)
for val in data.values()
if isinstance(val, (pd.Series, Series))
]
if input_series:
if index is not None:
aligned_input_series = [
sr._align_to_index(index, how="right", sort=False)
for sr in input_series
]
else:
aligned_input_series = cudf.core.series._align_indices(
input_series
)
index = aligned_input_series[0].index
for name, val in data.items():
if isinstance(val, (pd.Series, Series)):
data[name] = aligned_input_series.pop(0)
return data, index
@property
def _constructor(self):
return DataFrame
@property
def _constructor_sliced(self):
return Series
@property
def _constructor_expanddim(self):
raise NotImplementedError(
"_constructor_expanddim not supported for DataFrames!"
)
def serialize(self):
header = {}
frames = []
header["type-serialized"] = pickle.dumps(type(self))
header["index"], index_frames = self._index.serialize()
header["index_frame_count"] = len(index_frames)
frames.extend(index_frames)
# Use the column directly to avoid duplicating the index
# need to pickle column names to handle numpy integer columns
header["column_names"] = pickle.dumps(tuple(self._data.names))
column_header, column_frames = column.serialize_columns(self._columns)
header["columns"] = column_header
frames.extend(column_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
# Reconstruct the index
index_frames = frames[: header["index_frame_count"]]
idx_typ = pickle.loads(header["index"]["type-serialized"])
index = idx_typ.deserialize(header["index"], index_frames)
# Reconstruct the columns
column_frames = frames[header["index_frame_count"] :]
column_names = pickle.loads(header["column_names"])
columns = column.deserialize_columns(header["columns"], column_frames)
return cls(dict(zip(column_names, columns)), index=index)
@property
def dtypes(self):
"""Return the dtypes in this object."""
return pd.Series(
[x.dtype for x in self._data.columns], index=self._data.names
)
@property
def shape(self):
"""Returns a tuple representing the dimensionality of the DataFrame.
"""
return self._num_rows, self._num_columns
@property
def ndim(self):
"""Dimension of the data. DataFrame ndim is always 2.
"""
return 2
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(
c for c in self.columns if isinstance(c, str) and c.isidentifier()
)
return list(o)
def __setattr__(self, key, col):
# if an attribute already exists, set it.
try:
object.__getattribute__(self, key)
object.__setattr__(self, key, col)
return
except AttributeError:
pass
# if a column already exists, set it.
if key not in self._internal_names:
try:
self[key] # __getitem__ to verify key exists
self[key] = col
return
except KeyError:
pass
object.__setattr__(self, key, col)
def __getattr__(self, key):
if key in self._internal_names:
return object.__getattribute__(self, key)
else:
if key in self:
return self[key]
raise AttributeError("'DataFrame' object has no attribute %r" % key)
@annotate("DATAFRAME_GETITEM", color="blue", domain="cudf_python")
def __getitem__(self, arg):
"""
If *arg* is a ``str`` or ``int`` type, return the column Series.
If *arg* is a ``slice``, return a new DataFrame with all columns
sliced to the specified range.
If *arg* is an ``array`` containing column names, return a new
DataFrame with the corresponding columns.
If *arg* is a ``dtype.bool array``, return the rows marked True
Examples
--------
>>> df = DataFrame([('a', list(range(20))),
... ('b', list(range(20))),
... ('c', list(range(20)))])
>>> df[:4] # get first 4 rows of all columns
a b c
0 0 0 0
1 1 1 1
2 2 2 2
3 3 3 3
>>> df[-5:] # get last 5 rows of all columns
a b c
15 15 15 15
16 16 16 16
17 17 17 17
18 18 18 18
19 19 19 19
>>> df[['a', 'c']] # get columns a and c
a c
0 0 0
1 1 1
2 2 2
3 3 3
4 4 4
5 5 5
6 6 6
7 7 7
8 8 8
9 9 9
>>> df[[True, False, True, False]] # mask the entire dataframe,
# returning the rows specified in the boolean mask
"""
if is_scalar(arg) or isinstance(arg, tuple):
return self._get_columns_by_label(arg, downcast=True)
elif isinstance(arg, slice):
return self._slice(arg)
elif isinstance(
arg,
(
list,
cupy.ndarray,
np.ndarray,
pd.Series,
Series,
Index,
pd.Index,
),
):
mask = arg
if isinstance(mask, list):
mask = pd.Series(mask)
if mask.dtype == "bool":
return self._apply_boolean_mask(mask)
else:
return self._get_columns_by_label(mask)
elif isinstance(arg, DataFrame):
return self.where(arg)
else:
raise TypeError(
f"__getitem__ on type {type(arg)} is not supported"
)
@annotate("DATAFRAME_SETITEM", color="blue", domain="cudf_python")
def __setitem__(self, arg, value):
"""Add/set column by *arg or DataFrame*
"""
if isinstance(arg, DataFrame):
# not handling set_item where arg = df & value = df
if isinstance(value, DataFrame):
raise TypeError(
f"__setitem__ with arg = {type(value)} and "
f"value = {type(arg)} is not supported"
)
else:
for col_name in self._data:
scatter_map = arg[col_name]
if is_scalar(value):
self._data[col_name][scatter_map] = value
else:
self._data[col_name][scatter_map] = column.as_column(
value
)[scatter_map]
elif is_scalar(arg) or isinstance(arg, tuple):
if isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=[arg],
mask=None,
)
else:
if arg in self._data:
if len(self) == 0:
if isinstance(value, (pd.Series, Series)):
self._index = as_index(value.index)
elif len(value) > 0:
self._index = RangeIndex(start=0, stop=len(value))
value = column.as_column(value)
new_data = self._data.__class__()
for key in self._data:
if key == arg:
new_data[key] = value
else:
new_data[key] = column.column_empty_like(
self._data[key],
masked=True,
newsize=len(value),
)
self._data = new_data
return
elif isinstance(value, (pd.Series, Series)):
value = Series(value)._align_to_index(
self._index,
how="right",
sort=False,
allow_non_unique=True,
)
if is_scalar(value):
self._data[arg][:] = value
else:
value = as_column(value)
self._data[arg] = value
else:
# disc. with pandas here
# pandas raises key error here
self.insert(len(self._data), arg, value)
elif isinstance(
arg, (list, np.ndarray, pd.Series, Series, Index, pd.Index)
):
mask = arg
if isinstance(mask, list):
mask = np.array(mask)
if mask.dtype == "bool":
mask = column.as_column(arg)
if isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=None,
mask=mask,
)
else:
if not is_scalar(value):
value = column.as_column(value)[mask]
for col_name in self._data:
self._data[col_name][mask] = value
else:
if isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=arg,
mask=None,
)
else:
for col in arg:
if is_scalar(value):
self._data[col] = column.full(
size=len(self), fill_value=value
)
else:
self._data[col] = column.as_column(value)
else:
raise TypeError(
f"__setitem__ on type {type(arg)} is not supported"
)
def __delitem__(self, name):
"""
Drop the given column by *name*.
"""
self._drop_column(name)
def __sizeof__(self):
columns = sum(col.__sizeof__() for col in self._data.columns)
index = self._index.__sizeof__()
return columns + index
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
Examples
--------
>>> dtypes = ['int64', 'float64', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = cudf.DataFrame(data)
>>> df.head()
int64 float64 object bool
0 1 1.0 1.0 True
1 1 1.0 1.0 True
2 1 1.0 1.0 True
3 1 1.0 1.0 True
4 1 1.0 1.0 True
>>> df.memory_usage(index=False)
int64 40000
float64 40000
object 40000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5048
"""
ind = list(self.columns)
sizes = [col._memory_usage(deep=deep) for col in self._data.columns]
if index:
ind.append("Index")
ind = cudf.Index(ind, dtype="str")
sizes.append(self.index.memory_usage(deep=deep))
return Series(sizes, index=ind)
def __len__(self):
"""
Returns the number of rows
"""
return len(self.index)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
import cudf
if method == "__call__" and hasattr(cudf, ufunc.__name__):
func = getattr(cudf, ufunc.__name__)
return func(self)
else:
return NotImplemented
def __array_function__(self, func, types, args, kwargs):
cudf_df_module = DataFrame
cudf_series_module = Series
for submodule in func.__module__.split(".")[1:]:
# point cudf to the correct submodule
if hasattr(cudf_df_module, submodule):
cudf_df_module = getattr(cudf_df_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [cudf_df_module, cudf_series_module]
for t in types:
if t not in handled_types:
return NotImplemented
if hasattr(cudf_df_module, fname):
cudf_func = getattr(cudf_df_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
@property
def values(self):
"""
Return a CuPy representation of the DataFrame.
Only the values in the DataFrame will be returned, the axes labels will
be removed.
Returns
-------
out: cupy.ndarray
The values of the DataFrame.
"""
return cupy.asarray(self.as_gpu_matrix())
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not "
"allowed, To explicitly construct a GPU matrix, consider using "
".as_gpu_matrix()\nTo explicitly construct a host "
"matrix, consider using .as_matrix()"
)
def __arrow_array__(self, type=None):
raise TypeError(
"Implicit conversion to a host PyArrow Table via __arrow_array__ "
"is not allowed, To explicitly construct a PyArrow Table, "
"consider using .to_arrow()"
)
def _get_numeric_data(self):
""" Return a dataframe with only numeric data types """
columns = [
c
for c, dt in self.dtypes.items()
if dt != object and not is_categorical_dtype(dt)
]
return self[columns]
def assign(self, **kwargs):
"""
Assign columns to DataFrame from keyword arguments.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df = df.assign(a=[0, 1, 2], b=[3, 4, 5])
>>> df
a b
0 0 3
1 1 4
2 2 5
"""
new = self.copy()
for k, v in kwargs.items():
new[k] = v
return new
def head(self, n=5):
"""
Returns the first n rows as a new DataFrame
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.head(2)
key val
0 0 10.0
1 1 11.0
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Returns the last n rows as a new DataFrame
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.tail(2)
key val
3 3 13.0
4 4 14.0
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def to_string(self):
"""
Convert to string
cuDF uses Pandas internals for efficient string formatting.
Set formatting options using pandas string formatting options and
cuDF objects will print identically to Pandas objects.
cuDF supports `null/None` as a value in any column type, which
is transparently supported during this output process.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2]
>>> df['val'] = [float(i + 10) for i in range(3)]
>>> df.to_string()
' key val\\n0 0 10.0\\n1 1 11.0\\n2 2 12.0'
"""
return self.__repr__()
def __str__(self):
return self.to_string()
def astype(self, dtype, copy=False, errors="raise", **kwargs):
"""
Cast the DataFrame to the given dtype
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire DataFrame object to
the same type. Alternatively, use ``{col: dtype, ...}``, where col
is a column label and dtype is a numpy.dtype or Python type
to cast one or more of the DataFrame's columns to
column-specific types.
copy : bool, default False
Return a deep-copy when ``copy=True``. Note by default
``copy=False`` setting is used and hence changes to
values then may propagate to other cudf objects.
errors : {'raise', 'ignore', 'warn'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original
object.
- ``warn`` : prints last exceptions as warnings and
return original object.
**kwargs : extra arguments to pass on to the constructor
Returns
-------
casted : DataFrame
"""
result = DataFrame(index=self.index)
if is_dict_like(dtype):
current_cols = self._data.names
if len(set(dtype.keys()) - set(current_cols)) > 0:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
for col_name in current_cols:
if col_name in dtype:
result._data[col_name] = self._data[col_name].astype(
dtype=dtype[col_name],
errors=errors,
copy=copy,
**kwargs,
)
else:
result._data[col_name] = (
self._data[col_name].copy(deep=True)
if copy
else self._data[col_name]
)
else:
for col in self._data:
result._data[col] = self._data[col].astype(
dtype=dtype, **kwargs
)
return result
def _repr_pandas025_formatting(self, ncols, nrows, dtype=None):
"""
With Pandas > 0.25 there are some new conditional formatting for some
datatypes and column/row configurations. This fixes most of them in
context to match the expected Pandas repr of the same content.
Examples
--------
>>> gdf.__repr__()
0 ... 19
0 46 ... 48
.. .. ... ..
19 40 ... 29
[20 rows x 20 columns]
>>> nrows, ncols = _repr_pandas025_formatting(2, 2, dtype="category")
>>> pd.options.display.max_rows = nrows
>>> pd.options.display.max_columns = ncols
>>> gdf.__repr__()
0 ... 19
0 46 ... 48
.. .. ... ..
19 40 ... 29
[20 rows x 20 columns]
"""
ncols = 1 if ncols in [0, 2] and dtype == "datetime64[ns]" else ncols
ncols = (
1
if ncols == 0
and nrows == 1
and dtype in ["int8", "str", "category"]
else ncols
)
ncols = (
1
if nrows == 1
and dtype in ["int8", "int16", "int64", "str", "category"]
else ncols
)
ncols = 0 if ncols == 2 else ncols
ncols = 19 if ncols in [20, 21] else ncols
return ncols, nrows
def _clean_renderable_dataframe(self, output):
"""
This method takes in partial/preprocessed dataframe
and returns correct representation of it with correct
dimensions (rows x columns)
"""
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
output = output.to_pandas().to_string(
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
lines = output.split("\n")
if lines[-1].startswith("["):
lines = lines[:-1]
lines.append(
"[%d rows x %d columns]" % (len(self), len(self._data.names))
)
return "\n".join(lines)
def _clean_nulls_from_dataframe(self, df):
"""
This function converts all ``null`` values to ``<NA>`` for
representation as a string in `__repr__`.
Since we utilize Pandas `__repr__` at all places in our code
for formatting purposes, we convert columns to `str` dtype for
filling with `<NA>` values.
"""
for col in df._data:
if is_list_dtype(df._data[col]) or is_struct_dtype(df._data[col]):
# TODO we need to handle this
pass
elif df._data[col].has_nulls:
df[col] = df._data[col].astype("str").fillna(cudf._NA_REP)
else:
df[col] = df._data[col]
return df
def _get_renderable_dataframe(self):
"""
takes rows and columns from pandas settings or estimation from size.
pulls quadrents based off of some known parameters then style for
multiindex as well producing an efficient representative string
for printing with the dataframe.
"""
max_rows = pd.options.display.max_rows
nrows = np.max([len(self) if max_rows is None else max_rows, 1])
if pd.options.display.max_rows == 0:
nrows = len(self)
ncols = (
pd.options.display.max_columns
if pd.options.display.max_columns
else pd.options.display.width / 2
)
if len(self) <= nrows and len(self._data.names) <= ncols:
output = self.copy(deep=False)
elif self.empty and len(self.index) > 0:
max_seq_items = pd.options.display.max_seq_items
# Incase of Empty DataFrame with index, Pandas prints
# first `pd.options.display.max_seq_items` index values
# followed by ... To obtain ... at the end of index list,
# adding 1 extra value.
# If `pd.options.display.max_seq_items` is None,
# entire sequence/Index is to be printed.
# Note : Pandas truncates the dimensions at the end of
# the resulting dataframe when `display.show_dimensions`
# is set to truncate. Hence to display the dimensions we
# need to extract maximum of `max_seq_items` and `nrows`
# and have 1 extra value for ... to show up in the output
# string.
if max_seq_items is not None:
output = self.head(max(max_seq_items, nrows) + 1)
else:
output = self.copy(deep=False)
else:
left_cols = len(self._data.names)
right_cols = 0
upper_rows = len(self)
lower_rows = 0
if len(self) > nrows and nrows > 0:
upper_rows = int(nrows / 2.0) + 1
lower_rows = upper_rows + (nrows % 2)
if len(self._data.names) > ncols:
right_cols = len(self._data.names) - int(ncols / 2.0)
# adjust right columns for output if multiindex.
right_cols = (
right_cols - 1
if isinstance(self.index, cudf.MultiIndex)
else right_cols
)
left_cols = int(ncols / 2.0) + 1
if right_cols > 0:
# Pick ncols - left_cols number of columns
# from the right side/from the end.
right_cols = -(int(ncols) - left_cols + 1)
else:
# If right_cols is 0 or negative, it means
# self has lesser number of columns than ncols.
# Hence assign len(self._data.names) which
# will result in empty `*_right` quadrants.
# This is because `*_left` quadrants will
# contain all columns.
right_cols = len(self._data.names)
upper_left = self.head(upper_rows).iloc[:, :left_cols]
upper_right = self.head(upper_rows).iloc[:, right_cols:]
lower_left = self.tail(lower_rows).iloc[:, :left_cols]
lower_right = self.tail(lower_rows).iloc[:, right_cols:]
upper = cudf.concat([upper_left, upper_right], axis=1)
lower = cudf.concat([lower_left, lower_right], axis=1)
output = cudf.concat([upper, lower])
output = self._clean_nulls_from_dataframe(output)
output._index = output._index._clean_nulls_from_index()
return output
def __repr__(self):
output = self._get_renderable_dataframe()
return self._clean_renderable_dataframe(output)
def _repr_html_(self):
lines = (
self._get_renderable_dataframe()
.to_pandas()
._repr_html_()
.split("\n")
)
if lines[-2].startswith("<p>"):
lines = lines[:-2]
lines.append(
"<p>%d rows × %d columns</p>"
% (len(self), len(self._data.names))
)
lines.append("</div>")
return "\n".join(lines)
def _repr_latex_(self):
return self._get_renderable_dataframe().to_pandas()._repr_latex_()
# unary, binary, rbinary, orderedcompare, unorderedcompare
def _apply_op(self, fn, other=None, fill_value=None):
result = DataFrame(index=self.index)
def op(lhs, rhs):
if fill_value is None:
return getattr(lhs, fn)(rhs)
else:
return getattr(lhs, fn)(rhs, fill_value)
if other is None:
for col in self._data:
result[col] = getattr(self[col], fn)()
return result
elif isinstance(other, Sequence):
for k, col in enumerate(self._data):
result[col] = getattr(self[col], fn)(other[k])
elif isinstance(other, DataFrame):
if fn in cudf.utils.utils._EQUALITY_OPS:
if not self.index.equals(other.index):
raise ValueError(
"Can only compare identically-labeled "
"DataFrame objects"
)
lhs, rhs = _align_indices(self, other)
result.index = lhs.index
max_num_rows = max(lhs.shape[0], rhs.shape[0])
def fallback(col, fn):
if fill_value is None:
return Series.from_masked_array(
data=column_empty(max_num_rows, dtype="float64"),
mask=create_null_mask(
max_num_rows, state=MaskState.ALL_NULL
),
).set_index(col.index)
else:
return getattr(col, fn)(fill_value)
for col in lhs._data:
if col not in rhs._data:
result[col] = fallback(lhs[col], fn)
else:
result[col] = op(lhs[col], rhs[col])
for col in rhs._data:
if col not in lhs._data:
result[col] = fallback(rhs[col], _reverse_op(fn))
elif isinstance(other, Series):
other_cols = other.to_pandas().to_dict()
other_cols_keys = list(other_cols.keys())
result_cols = list(self.columns)
df_cols = list(result_cols)
for new_col in other_cols.keys():
if new_col not in result_cols:
result_cols.append(new_col)
for col in result_cols:
if col in df_cols and col in other_cols_keys:
l_opr = self[col]
r_opr = other_cols[col]
else:
if col not in df_cols:
r_opr = other_cols[col]
l_opr = Series(
column_empty(
len(self), masked=True, dtype=other.dtype
)
)
if col not in other_cols_keys:
r_opr = None
l_opr = self[col]
result[col] = op(l_opr, r_opr)
elif isinstance(other, (numbers.Number, cudf.Scalar)) or (
isinstance(other, np.ndarray) and other.ndim == 0
):
for col in self._data:
result[col] = op(self[col], other)
else:
raise NotImplementedError(
"DataFrame operations with " + str(type(other)) + " not "
"supported at this time."
)
return result
def add(self, other, axis="columns", level=None, fill_value=None):
"""
Get Addition of dataframe and other, element-wise (binary
operator `add`).
Equivalent to ``dataframe + other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `radd`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("add", other, fill_value)
def __add__(self, other):
return self._apply_op("__add__", other)
def radd(self, other, axis=1, level=None, fill_value=None):
"""
Get Addition of dataframe and other, element-wise (binary
operator `radd`).
Equivalent to ``other + dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `add`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("radd", other, fill_value)
def __radd__(self, other):
return self._apply_op("__radd__", other)
def sub(self, other, axis="columns", level=None, fill_value=None):
"""
Get Subtraction of dataframe and other, element-wise (binary
operator `sub`).
Equivalent to ``dataframe - other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rsub`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub([1, 2])
angles degrees
circle -1 358
triangle 2 178
rectangle 3 358
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("sub", other, fill_value)
def __sub__(self, other):
return self._apply_op("__sub__", other)
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""
Get Subtraction of dataframe and other, element-wise (binary
operator `rsub`).
Equivalent to ``other - dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `sub`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
>>> df.rsub([1, 2])
angles degrees
circle 1 -358
triangle -2 -178
rectangle -3 -358
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rsub", other, fill_value)
def __rsub__(self, other):
return self._apply_op("__rsub__", other)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""
Get Multiplication of dataframe and other, element-wise (binary
operator `mul`).
Equivalent to ``dataframe * other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rmul`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> other = cudf.DataFrame({'angles': [0, 3, 4]},
... index=['circle', 'triangle', 'rectangle'])
>>> df * other
angles degrees
circle 0 <NA>
triangle 9 <NA>
rectangle 16 <NA>
>>> df.mul(other, fill_value=0)
angles degrees
circle 0 0
triangle 9 0
rectangle 16 0
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("mul", other, fill_value)
def __mul__(self, other):
return self._apply_op("__mul__", other)
def rmul(self, other, axis="columns", level=None, fill_value=None):
"""
Get Multiplication of dataframe and other, element-wise (binary
operator `rmul`).
Equivalent to ``other * dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `mul`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> other = cudf.DataFrame({'angles': [0, 3, 4]},
... index=['circle', 'triangle', 'rectangle'])
>>> other * df
angles degrees
circle 0 <NA>
triangle 9 <NA>
rectangle 16 <NA>
>>> df.rmul(other, fill_value=0)
angles degrees
circle 0 0
triangle 9 0
rectangle 16 0
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rmul", other, fill_value)
def __rmul__(self, other):
return self._apply_op("__rmul__", other)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""
Get Modulo division of dataframe and other, element-wise (binary
operator `mod`).
Equivalent to ``dataframe % other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rmod`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df % 100
angles degrees
circle 0 60
triangle 3 80
rectangle 4 60
>>> df.mod(100)
angles degrees
circle 0 60
triangle 3 80
rectangle 4 60
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("mod", other, fill_value)
def __mod__(self, other):
return self._apply_op("__mod__", other)
def rmod(self, other, axis="columns", level=None, fill_value=None):
"""
Get Modulo division of dataframe and other, element-wise (binary
operator `rmod`).
Equivalent to ``other % dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `mod`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [1, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> 100 % df
angles degrees
circle 0 100
triangle 1 100
rectangle 0 100
>>> df.rmod(100)
angles degrees
circle 0 100
triangle 1 100
rectangle 0 100
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rmod", other, fill_value)
def __rmod__(self, other):
return self._apply_op("__rmod__", other)
def pow(self, other, axis="columns", level=None, fill_value=None):
"""
Get Exponential power of dataframe and other, element-wise (binary
operator `pow`).
Equivalent to ``dataframe ** other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rpow`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [1, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df ** 2
angles degrees
circle 0 129600
triangle 9 32400
rectangle 16 129600
>>> df.pow(2)
angles degrees
circle 0 129600
triangle 9 32400
rectangle 16 129600
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("pow", other, fill_value)
def __pow__(self, other):
return self._apply_op("__pow__", other)
def rpow(self, other, axis="columns", level=None, fill_value=None):
"""
Get Exponential power of dataframe and other, element-wise (binary
operator `pow`).
Equivalent to ``other ** dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `pow`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [1, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> 1 ** df
angles degrees
circle 1 1
triangle 1 1
rectangle 1 1
>>> df.rpow(1)
angles degrees
circle 1 1
triangle 1 1
rectangle 1 1
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rpow", other, fill_value)
def __rpow__(self, other):
return self._apply_op("__pow__", other)
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""
Get Integer division of dataframe and other, element-wise (binary
operator `floordiv`).
Equivalent to ``dataframe // other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rfloordiv`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [1, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df.floordiv(2)
angles degrees
circle 0 180
triangle 1 90
rectangle 2 180
>>> df // 2
angles degrees
circle 0 180
triangle 1 90
rectangle 2 180
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("floordiv", other, fill_value)
def __floordiv__(self, other):
return self._apply_op("__floordiv__", other)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
"""
Get Integer division of dataframe and other, element-wise (binary
operator `rfloordiv`).
Equivalent to ``other // dataframe``, but with support to substitute
a fill_value for missing data in one of the inputs. With reverse
version, `floordiv`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'col1': [10, 11, 23],
... 'col2': [101, 122, 321]})
>>> df
col1 col2
0 10 101
1 11 122
2 23 321
>>> df.rfloordiv(df)
col1 col2
0 1 1
1 1 1
2 1 1
>>> df.rfloordiv(200)
col1 col2
0 20 1
1 18 1
2 8 0
>>> df.rfloordiv(100)
col1 col2
0 10 0
1 9 0
2 4 0
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rfloordiv", other, fill_value)
def __rfloordiv__(self, other):
return self._apply_op("__rfloordiv__", other)
def truediv(self, other, axis="columns", level=None, fill_value=None):
"""
Get Floating division of dataframe and other, element-wise (binary
operator `truediv`).
Equivalent to ``dataframe / other``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `rtruediv`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("truediv", other, fill_value)
# Alias for truediv
div = truediv
def __truediv__(self, other):
return self._apply_op("__truediv__", other)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
"""
Get Floating division of dataframe and other, element-wise (binary
operator `rtruediv`).
Equivalent to ``other / dataframe``, but with support to substitute a
fill_value for missing data in one of the inputs. With reverse
version, `truediv`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]},
... index=['circle', 'triangle', 'rectangle'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rtruediv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> 10 / df
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
"""
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._apply_op("rtruediv", other, fill_value)
# Alias for rtruediv
rdiv = rtruediv
def __rtruediv__(self, other):
return self._apply_op("__rtruediv__", other)
__div__ = __truediv__
def __and__(self, other):
return self._apply_op("__and__", other)
def __or__(self, other):
return self._apply_op("__or__", other)
def __xor__(self, other):
return self._apply_op("__xor__", other)
def __eq__(self, other):
return self._apply_op("__eq__", other)
def __ne__(self, other):
return self._apply_op("__ne__", other)
def __lt__(self, other):
return self._apply_op("__lt__", other)
def __le__(self, other):
return self._apply_op("__le__", other)
def __gt__(self, other):
return self._apply_op("__gt__", other)
def __ge__(self, other):
return self._apply_op("__ge__", other)
def __invert__(self):
return self._apply_op("__invert__")
def __neg__(self):
return self._apply_op("__neg__")
def __abs__(self):
return self._apply_op("__abs__")
def __iter__(self):
return iter(self.columns)
def iteritems(self):
""" Iterate over column names and series pairs """
for k in self:
yield (k, self[k])
@property
@annotate("DATAFRAME_LOC", color="blue", domain="cudf_python")
def loc(self):
"""
Selecting rows and columns by label or boolean mask.
Examples
--------
DataFrame with string index.
>>> df
a b
a 0 5
b 1 6
c 2 7
d 3 8
e 4 9
Select a single row by label.
>>> df.loc['a']
a 0
b 5
Name: a, dtype: int64
Select multiple rows and a single column.
>>> df.loc[['a', 'c', 'e'], 'b']
a 5
c 7
e 9
Name: b, dtype: int64
Selection by boolean mask.
>>> df.loc[df.a > 2]
a b
d 3 8
e 4 9
Setting values using loc.
>>> df.loc[['a', 'c', 'e'], 'a'] = 0
>>> df
a b
a 0 5
b 1 6
c 0 7
d 3 8
e 0 9
See also
--------
DataFrame.iloc
Notes
-----
One notable difference from Pandas is when DataFrame is of
mixed types and result is expected to be a Series in case of Pandas.
cuDF will return a DataFrame as it doesn't support mixed types
under Series yet.
Mixed dtype single row output as a dataframe (pandas results in Series)
>>> import cudf
>>> df = cudf.DataFrame({"a":[1, 2, 3], "b":["a", "b", "c"]})
>>> df.loc[0]
a b
0 1 a
"""
return _DataFrameLocIndexer(self)
@property
def iloc(self):
"""
Selecting rows and column by position.
Examples
--------
>>> df = cudf.DataFrame({'a': range(20),
... 'b': range(20),
... 'c': range(20)})
Select a single row using an integer index.
>>> df.iloc[1]
a 1
b 1
c 1
Name: 1, dtype: int64
Select multiple rows using a list of integers.
>>> df.iloc[[0, 2, 9, 18]]
a b c
0 0 0 0
2 2 2 2
9 9 9 9
18 18 18 18
Select rows using a slice.
>>> df.iloc[3:10:2]
a b c
3 3 3 3
5 5 5 5
7 7 7 7
9 9 9 9
Select both rows and columns.
>>> df.iloc[[1, 3, 5, 7], 2]
1 1
3 3
5 5
7 7
Name: c, dtype: int64
Setting values in a column using iloc.
>>> df.iloc[:4] = 0
>>> df
a b c
0 0 0 0
1 0 0 0
2 0 0 0
3 0 0 0
4 4 4 4
5 5 5 5
6 6 6 6
7 7 7 7
8 8 8 8
9 9 9 9
[10 more rows]
See also
--------
DataFrame.loc
Notes
-----
One notable difference from Pandas is when DataFrame is of
mixed types and result is expected to be a Series in case of Pandas.
cuDF will return a DataFrame as it doesn't support mixed types
under Series yet.
Mixed dtype single row output as a dataframe (pandas results in Series)
>>> import cudf
>>> df = cudf.DataFrame({"a":[1, 2, 3], "b":["a", "b", "c"]})
>>> df.iloc[0]
a b
0 1 a
"""
return _DataFrameIlocIndexer(self)
@property
def iat(self):
"""
Alias for ``DataFrame.iloc``; provided for compatibility with Pandas.
"""
return self.iloc
@property
def at(self):
"""
Alias for ``DataFrame.loc``; provided for compatibility with Pandas.
"""
return self.loc
@property
@annotate("DATAFRAME_COLUMNS_GETTER", color="yellow", domain="cudf_python")
def columns(self):
"""Returns a tuple of columns
"""
return self._data.to_pandas_index()
@columns.setter
@annotate("DATAFRAME_COLUMNS_SETTER", color="yellow", domain="cudf_python")
def columns(self, columns):
if isinstance(columns, (cudf.MultiIndex, cudf.Index)):
columns = columns.to_pandas()
if columns is None:
columns = pd.Index(range(len(self._data.columns)))
is_multiindex = isinstance(columns, pd.MultiIndex)
if isinstance(
columns, (Series, cudf.Index, cudf.core.column.ColumnBase)
):
columns = pd.Index(columns.to_array(), tupleize_cols=is_multiindex)
elif not isinstance(columns, pd.Index):
columns = pd.Index(columns, tupleize_cols=is_multiindex)
if not len(columns) == len(self._data.names):
raise ValueError(
f"Length mismatch: expected {len(self._data.names)} elements ,"
f"got {len(columns)} elements"
)
data = dict(zip(columns, self._data.columns))
if len(columns) != len(data):
raise ValueError("Duplicate column names are not allowed")
self._data = ColumnAccessor(
data, multiindex=is_multiindex, level_names=columns.names,
)
def _rename_columns(self, new_names):
old_cols = iter(self._data.names)
l_old_cols = len(self._data)
l_new_cols = len(new_names)
if l_new_cols != l_old_cols:
msg = (
f"Length of new column names: {l_new_cols} does not "
"match length of previous column names: {l_old_cols}"
)
raise ValueError(msg)
mapper = dict(zip(old_cols, new_names))
self.rename(mapper=mapper, inplace=True, axis=1)
@property
def index(self):
"""Returns the index of the DataFrame
"""
return self._index
@index.setter
def index(self, value):
old_length = (
self._num_rows if self._index is None else len(self._index)
)
if isinstance(value, cudf.core.multiindex.MultiIndex):
if len(self._data) > 0 and len(value) != old_length:
msg = (
f"Length mismatch: Expected axis has {old_length} "
f"elements, new values have {len(value)} elements"
)
raise ValueError(msg)
self._index = value
return
new_length = len(value)
if len(self._data) > 0 and new_length != old_length:
msg = (
f"Length mismatch: Expected axis has {old_length} elements, "
f"new values have {new_length} elements"
)
raise ValueError(msg)
# try to build an index from generic _index
idx = as_index(value)
self._index = idx
def reindex(
self, labels=None, axis=0, index=None, columns=None, copy=True
):
"""
Return a new DataFrame whose axes conform to a new index
``DataFrame.reindex`` supports two calling conventions:
- ``(index=index_labels, columns=column_names)``
- ``(labels, axis={0 or 'index', 1 or 'columns'})``
Parameters
----------
labels : Index, Series-convertible, optional, default None
axis : {0 or 'index', 1 or 'columns'}, optional, default 0
index : Index, Series-convertible, optional, default None
Shorthand for ``df.reindex(labels=index_labels, axis=0)``
columns : array-like, optional, default None
Shorthand for ``df.reindex(labels=column_names, axis=1)``
copy : boolean, optional, default True
Returns
-------
A DataFrame whose axes conform to the new index(es)
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)]
>>> df_new = df.reindex(index=[0, 3, 4, 5],
... columns=['key', 'val', 'sum'])
>>> df
key val
0 0 10.0
1 1 11.0
2 2 12.0
3 3 13.0
4 4 14.0
>>> df_new
key val sum
0 0 10.0 NaN
3 3 13.0 NaN
4 4 14.0 NaN
5 -1 NaN NaN
"""
if labels is None and index is None and columns is None:
return self.copy(deep=copy)
df = self
cols = columns
dtypes = OrderedDict(df.dtypes)
idx = labels if index is None and axis in (0, "index") else index
cols = labels if cols is None and axis in (1, "columns") else cols
df = df if cols is None else df[list(set(df.columns) & set(cols))]
result = df._reindex(
columns=cols, dtypes=dtypes, deep=copy, index=idx, inplace=False
)
return result
def _set_index(
self, index, to_drop=None, inplace=False, verify_integrity=False,
):
"""Helper for `.set_index`
Parameters
----------
index : Index
The new index to set.
to_drop : list optional, default None
A list of labels indicating columns to drop.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : boolean, default False
Check for duplicates in the new index.
"""
if not isinstance(index, Index):
raise ValueError("Parameter index should be type `Index`.")
df = self if inplace else self.copy(deep=True)
if verify_integrity and not index.is_unique:
raise ValueError(f"Values in Index are not unique: {index}")
if to_drop:
df.drop(columns=to_drop, inplace=True)
df.index = index
return df if not inplace else None
def set_index(
self,
keys,
drop=True,
append=False,
inplace=False,
verify_integrity=False,
):
"""Return a new DataFrame with a new index
Parameters
----------
keys : Index, Series-convertible, label-like, or list
Index : the new index.
Series-convertible : values for the new index.
Label-like : Label of column to be used as index.
List : List of items from above.
drop : boolean, default True
Whether to drop corresponding column for str index argument
append : boolean, default True
Whether to append columns to the existing index,
resulting in a MultiIndex.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : boolean, default False
Check for duplicates in the new index.
Examples
--------
>>> df = cudf.DataFrame({
... "a": [1, 2, 3, 4, 5],
... "b": ["a", "b", "c", "d","e"],
... "c": [1.0, 2.0, 3.0, 4.0, 5.0]
... })
>>> df
a b c
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
Set the index to become the ‘b’ column:
>>> df.set_index('b')
a c
b
a 1 1.0
b 2 2.0
c 3 3.0
d 4 4.0
e 5 5.0
Create a MultiIndex using columns ‘a’ and ‘b’:
>>> df.set_index(["a", "b"])
c
a b
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
Set new Index instance as index:
>>> df.set_index(cudf.RangeIndex(10, 15))
a b c
10 1 a 1.0
11 2 b 2.0
12 3 c 3.0
13 4 d 4.0
14 5 e 5.0
Setting `append=True` will combine current index with column `a`:
>>> df.set_index("a", append=True)
b c
a
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
`set_index` supports `inplace` parameter too:
>>> df.set_index("a", inplace=True)
>>> df
b c
a
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
"""
if not isinstance(keys, list):
keys = [keys]
# Preliminary type check
col_not_found = []
columns_to_add = []
names = []
to_drop = []
for i, col in enumerate(keys):
# Is column label
if is_scalar(col) or isinstance(col, tuple):
if col in self.columns:
columns_to_add.append(self[col])
names.append(col)
if drop:
to_drop.append(col)
else:
col_not_found.append(col)
else:
# Try coerce into column
if not is_column_like(col):
try:
col = as_column(col)
except TypeError:
msg = f"{col} cannot be converted to column-like."
raise TypeError(msg)
if isinstance(col, (cudf.MultiIndex, pd.MultiIndex)):
col = (
cudf.from_pandas(col)
if isinstance(col, pd.MultiIndex)
else col
)
cols = [col._data[x] for x in col._data]
columns_to_add.extend(cols)
names.extend(col.names)
else:
if isinstance(col, (pd.RangeIndex, cudf.RangeIndex)):
# Corner case: RangeIndex does not need to instantiate
columns_to_add.append(col)
else:
# For pandas obj, convert to gpu obj
columns_to_add.append(as_column(col))
if isinstance(
col, (cudf.Series, cudf.Index, pd.Series, pd.Index)
):
names.append(col.name)
else:
names.append(None)
if col_not_found:
raise KeyError(f"None of {col_not_found} are in the columns")
if append:
idx_cols = [self.index._data[x] for x in self.index._data]
if isinstance(self.index, cudf.MultiIndex):
idx_names = self.index.names
else:
idx_names = [self.index.name]
columns_to_add = idx_cols + columns_to_add
names = idx_names + names
if len(columns_to_add) == 0:
raise ValueError("No valid columns to be added to index.")
elif len(columns_to_add) == 1:
idx = cudf.Index(columns_to_add[0], name=names[0])
else:
idf = cudf.DataFrame()
for i, col in enumerate(columns_to_add):
idf[i] = col
idx = cudf.MultiIndex.from_frame(idf, names=names)
return self._set_index(
index=idx,
to_drop=to_drop,
inplace=inplace,
verify_integrity=verify_integrity,
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""
Reset the index.
Reset the index of the DataFrame, and use the default one instead.
Parameters
----------
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
Examples
--------
>>> df = cudf.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal <NA>
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal <NA>
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal <NA>
"""
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
if col_level != 0:
raise NotImplementedError(
"col_level parameter is not supported yet."
)
if col_fill != "":
raise NotImplementedError(
"col_fill parameter is not supported yet."
)
if inplace:
result = self
else:
result = self.copy()
if all(name is None for name in self.index.names):
if isinstance(self.index, cudf.MultiIndex):
names = tuple(
f"level_{i}" for i, _ in enumerate(self.index.names)
)
else:
names = ("index",)
else:
names = self.index.names
if not drop:
index_columns = self.index._data.columns
for name, index_column in zip(
reversed(names), reversed(index_columns)
):
result.insert(0, name, index_column)
result.index = RangeIndex(len(self))
if inplace:
return
else:
return result
def take(self, positions, keep_index=True):
"""
Return a new DataFrame containing the rows specified by *positions*
Parameters
----------
positions : array-like
Integer or boolean array-like specifying the rows of the output.
If integer, each element represents the integer index of a row.
If boolean, *positions* must be of the same length as *self*,
and represents a boolean mask.
Returns
-------
out : DataFrame
New DataFrame
Examples
--------
>>> a = cudf.DataFrame({'a': [1.0, 2.0, 3.0],
... 'b': cudf.Series(['a', 'b', 'c'])})
>>> a.take([0, 2, 2])
a b
0 1.0 a
2 3.0 c
2 3.0 c
>>> a.take([True, False, True])
a b
0 1.0 a
2 3.0 c
"""
positions = as_column(positions)
if pd.api.types.is_bool_dtype(positions):
return self._apply_boolean_mask(positions)
out = self._gather(positions, keep_index=keep_index)
out.columns = self.columns
return out
@annotate("DATAFRAME_COPY", color="cyan", domain="cudf_python")
def copy(self, deep=True):
"""
Returns a copy of this dataframe
Parameters
----------
deep: bool
Make a full copy of Series columns and Index at the GPU level, or
create a new allocation with references.
"""
out = DataFrame(data=self._data.copy(deep=deep))
out.index = self.index.copy(deep=deep)
return out
def __copy__(self):
return self.copy(deep=True)
def __deepcopy__(self, memo=None):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
if memo is None:
memo = {}
return self.copy(deep=True)
@annotate("INSERT", color="green", domain="cudf_python")
def insert(self, loc, name, value):
""" Add a column to DataFrame at the index specified by loc.
Parameters
----------
loc : int
location to insert by index, cannot be greater then num columns + 1
name : number or string
name or label of column to be inserted
value : Series or array-like
"""
num_cols = len(self._data)
if name in self._data:
raise NameError(f"duplicated column name {name}")
if loc < 0:
loc = num_cols + loc + 1
if not (0 <= loc <= num_cols):
raise ValueError(
f"insert location must be within range "
f"{-(num_cols + 1) * (num_cols > 0)}, "
f"{num_cols * (num_cols > 0)}"
)
if is_scalar(value):
value = utils.scalar_broadcast_to(value, len(self))
if len(self) == 0:
if isinstance(value, (pd.Series, Series)):
self._index = as_index(value.index)
elif len(value) > 0:
self._index = RangeIndex(start=0, stop=len(value))
new_data = self._data.__class__()
if num_cols != 0:
for col_name in self._data:
new_data[col_name] = column.column_empty_like(
self._data[col_name],
masked=True,
newsize=len(value),
)
self._data = new_data
elif isinstance(value, (pd.Series, Series)):
value = Series(value)._align_to_index(
self._index, how="right", sort=False
)
value = column.as_column(value)
self._data.insert(name, value, loc=loc)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A": [1, 2, 3, 4],
... "B": [5, 6, 7, 8],
... "C": [10, 11, 12, 13],
... "D": [20, 30, 40, 50]})
>>> df
A B C D
0 1 5 10 20
1 2 6 11 30
2 3 7 12 40
3 4 8 13 50
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 1 20
1 2 30
2 3 40
3 4 50
>>> df.drop(columns=['B', 'C'])
A D
0 1 20
1 2 30
2 3 40
3 4 50
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 3 7 12 40
3 4 8 13 50
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = cudf.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = cudf.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
if labels is not None:
if index is not None or columns is not None:
raise ValueError(
"Cannot specify both 'labels' and 'index'/'columns'"
)
target = labels
elif index is not None:
target = index
axis = 0
elif columns is not None:
target = columns
axis = 1
else:
raise ValueError(
"Need to specify at least one of 'labels', "
"'index' or 'columns'"
)
if inplace:
outdf = self
else:
outdf = self.copy()
if axis in (1, "columns"):
target = _get_host_unique(target)
_drop_columns(outdf, target, errors)
elif axis in (0, "index"):
if not isinstance(target, (cudf.Series, cudf.Index)):
target = column.as_column(target)
if isinstance(self._index, cudf.MultiIndex):
if level is None:
level = 0
levels_index = outdf.index.get_level_values(level)
if errors == "raise" and not target.isin(levels_index).all():
raise KeyError("One or more values not found in axis")
# TODO : Could use anti-join as a future optimization
sliced_df = outdf.take(~levels_index.isin(target))
sliced_df._index.names = self._index.names
else:
if errors == "raise" and not target.isin(outdf.index).all():
raise KeyError("One or more values not found in axis")
sliced_df = outdf.join(
cudf.DataFrame(index=target), how="leftanti"
)
if columns is not None:
columns = _get_host_unique(columns)
_drop_columns(sliced_df, columns, errors)
outdf._data = sliced_df._data
outdf._index = sliced_df._index
if not inplace:
return outdf
def _drop_column(self, name):
"""Drop a column by *name*
"""
if name not in self._data:
raise KeyError(f"column '{name}' does not exist")
del self._data[name]
def drop_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain subset of columns.
"""
outdf = super().drop_duplicates(
subset=subset, keep=keep, ignore_index=ignore_index
)
return self._mimic_inplace(outdf, inplace=inplace)
def pop(self, item):
"""Return a column and drop it from the DataFrame.
"""
popped = self[item]
del self[item]
return popped
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=0,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
"""Alter column and index labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don’t throw an
error.
``DataFrame.rename`` supports two calling conventions:
- ``(index=index_mapper, columns=columns_mapper, ...)``
- ``(mapper, axis={0/'index' or 1/'column'}, ...)``
We highly recommend using keyword arguments to clarify your intent.
Parameters
----------
mapper : dict-like or function, default None
optional dict-like or functions transformations to apply to
the index/column values depending on selected ``axis``.
index : dict-like, default None
Optional dict-like transformations to apply to the index axis'
values. Does not support functions for axis 0 yet.
columns : dict-like or function, default None
optional dict-like or functions transformations to apply to
the columns axis' values.
axis : int, default 0
Axis to rename with mapper.
0 or 'index' for index
1 or 'columns' for columns
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Return new DataFrame. If True, assign columns without copy
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'raise', 'ignore', 'warn'}, default 'ignore'
*Only 'ignore' supported*
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original
object.
- ``warn`` : prints last exceptions as warnings and
return original object.
Returns
-------
DataFrame
Notes
-----
Difference from pandas:
* Not supporting: level
Rename will not overwite column names. If a list with duplicates is
passed, column names will be postfixed with a number.
"""
if errors != "ignore":
raise NotImplementedError(
"Only errors='ignore' is currently supported"
)
if level:
raise NotImplementedError(
"Only level=False is currently supported"
)
if mapper is None and index is None and columns is None:
return self.copy(deep=copy)
index = mapper if index is None and axis in (0, "index") else index
columns = (
mapper if columns is None and axis in (1, "columns") else columns
)
if index:
if (
any(type(item) == str for item in index.values())
and type(self.index) != cudf.core.index.StringIndex
):
raise NotImplementedError(
"Implicit conversion of index to "
"mixed type is not yet supported."
)
out = DataFrame(
index=self.index.replace(
to_replace=list(index.keys()),
replacement=list(index.values()),
)
)
else:
out = DataFrame(index=self.index)
if columns:
postfix = 1
if isinstance(columns, Mapping):
# It is possible for DataFrames with a MultiIndex columns
# object to have columns with the same name. The following
# use of _cols.items and ("_1", "_2"... allows the use of
# rename in this case
for key, col in self._data.items():
if key in columns:
if columns[key] in out._data:
out_column = columns[key] + "_" + str(postfix)
postfix += 1
else:
out_column = columns[key]
out[out_column] = col
else:
out[key] = col
elif callable(columns):
for key, col in self._data.items():
out[columns(key)] = col
else:
out._data = self._data.copy(deep=copy)
if inplace:
self._data = out._data
else:
return out.copy(deep=copy)
def nans_to_nulls(self):
"""
Convert nans (if any) to nulls.
"""
df = self.copy()
for col in df.columns:
df[col] = df[col].nans_to_nulls()
return df
def as_gpu_matrix(self, columns=None, order="F"):
"""Convert to a matrix in device memory.
Parameters
----------
columns : sequence of str
List of a column names to be extracted. The order is preserved.
If None is specified, all columns are used.
order : 'F' or 'C'
Optional argument to determine whether to return a column major
(Fortran) matrix or a row major (C) matrix.
Returns
-------
A (nrow x ncol) numba device ndarray
"""
if columns is None:
columns = self._data.names
cols = [self._data[k] for k in columns]
ncol = len(cols)
nrow = len(self)
if ncol < 1:
# This is the case for empty dataframe - construct empty cupy array
matrix = cupy.empty(
shape=(0, 0), dtype=np.dtype("float64"), order=order
)
return cuda.as_cuda_array(matrix)
if any(
(is_categorical_dtype(c) or np.issubdtype(c, np.dtype("object")))
for c in cols
):
raise TypeError("non-numeric data not yet supported")
dtype = find_common_type([col.dtype for col in cols])
for k, c in self._data.items():
if c.has_nulls:
raise ValueError(
f"column '{k}' has null values. "
f"hint: use .fillna() to replace null values"
)
cupy_dtype = dtype
if np.issubdtype(cupy_dtype, np.datetime64):
cupy_dtype = np.dtype("int64")
if order not in ("F", "C"):
raise ValueError(
"order parameter should be 'C' for row major or 'F' for"
"column major GPU matrix"
)
matrix = cupy.empty(shape=(nrow, ncol), dtype=cupy_dtype, order=order)
for colidx, inpcol in enumerate(cols):
dense = inpcol.astype(cupy_dtype)
matrix[:, colidx] = cupy.asarray(dense)
return cuda.as_cuda_array(matrix).view(dtype)
def as_matrix(self, columns=None):
"""Convert to a matrix in host memory.
Parameters
----------
columns : sequence of str
List of a column names to be extracted. The order is preserved.
If None is specified, all columns are used.
Returns
-------
A (nrow x ncol) numpy ndarray in "F" order.
"""
return self.as_gpu_matrix(columns=columns).copy_to_host()
def one_hot_encoding(
self, column, prefix, cats, prefix_sep="_", dtype="float64"
):
"""
Expand a column with one-hot-encoding.
Parameters
----------
column : str
the source column with binary encoding for the data.
prefix : str
the new column name prefix.
cats : sequence of ints
the sequence of categories as integers.
prefix_sep : str
the separator between the prefix and the category.
dtype :
the dtype for the outputs; defaults to float64.
Returns
-------
a new dataframe with new columns append for each category.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> pet_owner = [1, 2, 3, 4, 5]
>>> pet_type = ['fish', 'dog', 'fish', 'bird', 'fish']
>>> df = pd.DataFrame({'pet_owner': pet_owner, 'pet_type': pet_type})
>>> df.pet_type = df.pet_type.astype('category')
Create a column with numerically encoded category values
>>> df['pet_codes'] = df.pet_type.cat.codes
>>> gdf = cudf.from_pandas(df)
Create the list of category codes to use in the encoding
>>> codes = gdf.pet_codes.unique()
>>> gdf.one_hot_encoding('pet_codes', 'pet_dummy', codes).head()
pet_owner pet_type pet_codes pet_dummy_0 pet_dummy_1 pet_dummy_2
0 1 fish 2 0.0 0.0 1.0
1 2 dog 1 0.0 1.0 0.0
2 3 fish 2 0.0 0.0 1.0
3 4 bird 0 1.0 0.0 0.0
4 5 fish 2 0.0 0.0 1.0
"""
if hasattr(cats, "to_arrow"):
cats = cats.to_arrow().to_pylist()
else:
cats = pd.Series(cats, dtype="object")
newnames = [
prefix_sep.join([prefix, "null" if cat is None else str(cat)])
for cat in cats
]
newcols = self[column].one_hot_encoding(cats=cats, dtype=dtype)
outdf = self.copy()
for name, col in zip(newnames, newcols):
outdf.insert(len(outdf._data), name, col)
return outdf
def label_encoding(
self, column, prefix, cats, prefix_sep="_", dtype=None, na_sentinel=-1
):
"""Encode labels in a column with label encoding.
Parameters
----------
column : str
the source column with binary encoding for the data.
prefix : str
the new column name prefix.
cats : sequence of ints
the sequence of categories as integers.
prefix_sep : str
the separator between the prefix and the category.
dtype :
the dtype for the outputs; see Series.label_encoding
na_sentinel : number
Value to indicate missing category.
Returns
-------
a new dataframe with a new column append for the coded values.
"""
newname = prefix_sep.join([prefix, "labels"])
newcol = self[column].label_encoding(
cats=cats, dtype=dtype, na_sentinel=na_sentinel
)
outdf = self.copy()
outdf.insert(len(outdf._data), newname, newcol)
return outdf
@annotate("ARGSORT", color="yellow", domain="cudf_python")
def argsort(self, ascending=True, na_position="last"):
"""
Sort by the values.
Parameters
----------
ascending : bool or list of bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {‘first’ or ‘last’}, default ‘last’
Argument ‘first’ puts NaNs at the beginning, ‘last’ puts NaNs
at the end.
Returns
-------
out_column_inds : cuDF Column of indices sorted based on input
Notes
-----
Difference from pandas:
- Support axis='index' only.
- Not supporting: inplace, kind
- Ascending can be a list of bools to control per column
"""
return self._get_sorted_inds(
ascending=ascending, na_position=na_position
)
@annotate("SORT_INDEX", color="red", domain="cudf_python")
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind=None,
na_position="last",
sort_remaining=True,
ignore_index=False,
):
"""Sort object by labels (along an axis).
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
This is only useful in the case of MultiIndex.
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : sorting method such as `quick sort` and others.
Not yet supported.
na_position : {‘first’, ‘last’}, default ‘last’
Puts NaNs at the beginning if first; last puts NaNs at the end.
sort_remaining : bool, default True
Not yet supported
ignore_index : bool, default False
if True, index will be replaced with RangeIndex.
Returns
-------
DataFrame or None
Examples
--------
>>> df = cudf.DataFrame(
... {"b":[3, 2, 1], "a":[2, 1, 3]}, index=[1, 3, 2])
>>> df.sort_index(axis=0)
b a
1 3 2
2 1 3
3 2 1
>>> df.sort_index(axis=1)
a b
1 2 3
3 1 2
2 3 1
"""
if kind is not None:
raise NotImplementedError("kind is not yet supported")
if not sort_remaining:
raise NotImplementedError(
"sort_remaining == False is not yet supported"
)
if axis in (0, "index"):
if level is not None and isinstance(self.index, cudf.MultiIndex):
# Pandas currently don't handle na_position
# in case of MultiIndex
if ascending is True:
na_position = "first"
else:
na_position = "last"
if is_list_like(level):
labels = [
self.index._get_level_label(lvl) for lvl in level
]
else:
labels = [self.index._get_level_label(level)]
inds = self.index._source_data[labels].argsort(
ascending=ascending, na_position=na_position
)
else:
inds = self.index.argsort(
ascending=ascending, na_position=na_position
)
outdf = self.take(inds)
else:
labels = sorted(self._data.names, reverse=not ascending)
outdf = self[labels]
if ignore_index is True:
outdf = outdf.reset_index(drop=True)
return self._mimic_inplace(outdf, inplace=inplace)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
"""
Sort by the values row-wise.
Parameters
----------
by : str or list of str
Name or list of names to sort by.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of the
by.
na_position : {‘first’, ‘last’}, default ‘last’
'first' puts nulls at the beginning, 'last' puts nulls at the end
ignore_index : bool, default False
If True, index will not be sorted.
Returns
-------
sorted_obj : cuDF DataFrame
Notes
-----
Difference from pandas:
* Support axis='index' only.
* Not supporting: inplace, kind
Examples
--------
>>> import cudf
>>> a = ('a', [0, 1, 2])
>>> b = ('b', [-3, 2, 0])
>>> df = cudf.DataFrame([a, b])
>>> df.sort_values('b')
a b
0 0 -3
2 2 0
1 1 2
"""
if inplace:
raise NotImplementedError("`inplace` not currently implemented.")
if kind != "quicksort":
raise NotImplementedError("`kind` not currently implemented.")
if axis != 0:
raise NotImplementedError("`axis` not currently implemented.")
# argsort the `by` column
return self.take(
self[by].argsort(ascending=ascending, na_position=na_position),
keep_index=not ignore_index,
)
def agg(self, aggs, axis=None):
"""
Aggregate using one or more operations over the specified axis.
Parameters
----------
aggs : Iterable (set, list, string, tuple or dict)
Function to use for aggregating data. Accepted types are:
* string name, e.g. ``"sum"``
* list of functions, e.g. ``["sum", "min", "max"]``
* dict of axis labels specified operations per column,
e.g. ``{"a": "sum"}``
axis : not yet supported
Returns
-------
Aggregation Result : ``Series`` or ``DataFrame``
When ``DataFrame.agg`` is called with single agg,
``Series`` is returned.
When ``DataFrame.agg`` is called with several aggs,
``DataFrame`` is returned.
Notes
-----
Difference from pandas:
* Not supporting: ``axis``, ``*args``, ``**kwargs``
"""
# TODO: Remove the typecasting below once issue #6846 is fixed
# link <https://github.com/rapidsai/cudf/issues/6846>
dtypes = [self[col].dtype for col in self._column_names]
common_dtype = cudf.utils.dtypes.find_common_type(dtypes)
df_normalized = self.astype(common_dtype)
if any(is_string_dtype(dt) for dt in dtypes):
raise NotImplementedError(
"DataFrame.agg() is not supported for "
"frames containing string columns"
)
if axis == 0 or axis is not None:
raise NotImplementedError("axis not implemented yet")
if isinstance(aggs, Iterable) and not isinstance(aggs, (str, dict)):
result = cudf.DataFrame()
# TODO : Allow simultaneous pass for multi-aggregation as
# a future optimization
for agg in aggs:
result[agg] = getattr(df_normalized, agg)()
return result.T.sort_index(axis=1, ascending=True)
elif isinstance(aggs, str):
if not hasattr(df_normalized, aggs):
raise AttributeError(
f"{aggs} is not a valid function for "
f"'DataFrame' object"
)
result = cudf.DataFrame()
result[aggs] = getattr(df_normalized, aggs)()
result = result.iloc[:, 0]
result.name = None
return result
elif isinstance(aggs, dict):
cols = aggs.keys()
if any([callable(val) for val in aggs.values()]):
raise NotImplementedError(
"callable parameter is not implemented yet"
)
elif all([isinstance(val, str) for val in aggs.values()]):
result = cudf.Series(index=cols)
for key, value in aggs.items():
col = df_normalized[key]
if not hasattr(col, value):
raise AttributeError(
f"{value} is not a valid function for "
f"'Series' object"
)
result[key] = getattr(col, value)()
elif all([isinstance(val, Iterable) for val in aggs.values()]):
idxs = set()
for val in aggs.values():
if isinstance(val, Iterable):
idxs.update(val)
elif isinstance(val, str):
idxs.add(val)
idxs = sorted(list(idxs))
for agg in idxs:
if agg is callable:
raise NotImplementedError(
"callable parameter is not implemented yet"
)
result = cudf.DataFrame(index=idxs, columns=cols)
for key in aggs.keys():
col = df_normalized[key]
col_empty = column_empty(
len(idxs), dtype=col.dtype, masked=True
)
ans = cudf.Series(data=col_empty, index=idxs)
if isinstance(aggs.get(key), Iterable):
# TODO : Allow simultaneous pass for multi-aggregation
# as a future optimization
for agg in aggs.get(key):
if not hasattr(col, agg):
raise AttributeError(
f"{agg} is not a valid function for "
f"'Series' object"
)
ans[agg] = getattr(col, agg)()
elif isinstance(aggs.get(key), str):
if not hasattr(col, aggs.get(key)):
raise AttributeError(
f"{aggs.get(key)} is not a valid function for "
f"'Series' object"
)
ans[aggs.get(key)] = getattr(col, agg)()
result[key] = ans
else:
raise ValueError("values of dict must be a string or list")
return result
elif callable(aggs):
raise NotImplementedError(
"callable parameter is not implemented yet"
)
else:
raise ValueError("argument must be a string, list or dict")
def nlargest(self, n, columns, keep="first"):
"""Get the rows of the DataFrame sorted by the n largest value of *columns*
Notes
-----
Difference from pandas:
- Only a single column is supported in *columns*
"""
return self._n_largest_or_smallest("nlargest", n, columns, keep)
def nsmallest(self, n, columns, keep="first"):
"""Get the rows of the DataFrame sorted by the n smallest value of *columns*
Notes
-----
Difference from pandas:
- Only a single column is supported in *columns*
"""
return self._n_largest_or_smallest("nsmallest", n, columns, keep)
def _n_largest_or_smallest(self, method, n, columns, keep):
# Get column to operate on
if not isinstance(columns, str):
[column] = columns
else:
column = columns
col = self[column].reset_index(drop=True)
# Operate
sorted_series = getattr(col, method)(n=n, keep=keep)
df = DataFrame()
new_positions = sorted_series.index.gpu_values
for k in self._data.names:
if k == column:
df[k] = sorted_series
else:
df[k] = self[k].reset_index(drop=True).take(new_positions)
return df.set_index(self.index.take(new_positions))
def transpose(self):
"""Transpose index and columns.
Returns
-------
a new (ncol x nrow) dataframe. self is (nrow x ncol)
Notes
-----
Difference from pandas:
Not supporting *copy* because default and only behavior is copy=True
"""
# Never transpose a MultiIndex - remove the existing columns and
# replace with a RangeIndex. Afterward, reassign.
columns = self.index.copy(deep=False)
index = self.columns.copy(deep=False)
if self._num_columns == 0 or self._num_rows == 0:
return DataFrame(index=index, columns=columns)
# Cython renames the columns to the range [0...ncols]
result = self.__class__._from_table(libcudf.transpose.transpose(self))
# Set the old column names as the new index
result._index = as_index(index)
# Set the old index as the new column names
result.columns = columns
return result
@property
def T(self):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows
as columns and vice-versa. The property T is an accessor to
the method transpose().
Returns
-------
out : DataFrame
The transposed DataFrame.
"""
return self.transpose()
def melt(self, **kwargs):
"""Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
default: None
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot.
default: all columns that are not set as `id_vars`.
var_name : scalar
Name to use for the `variable` column.
default: frame.columns.name or 'variable'
value_name : str
Name to use for the `value` column.
default: 'value'
Returns
-------
out : DataFrame
Melted result
"""
from cudf.core.reshape import melt
return melt(self, **kwargs)
@annotate("JOIN", color="blue", domain="cudf_python")
def merge(
self,
right,
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
how="inner",
sort=False,
lsuffix=None,
rsuffix=None,
method="hash",
indicator=False,
suffixes=("_x", "_y"),
):
"""Merge GPU DataFrame objects by performing a database-style join
operation by columns or indexes.
Parameters
----------
right : DataFrame
on : label or list; defaults to None
Column or index level names to join on. These must be found in
both DataFrames.
If on is None and not merging on indexes then
this defaults to the intersection of the columns
in both DataFrames.
how : {‘left’, ‘outer’, ‘inner’}, default ‘inner’
Type of merge to be performed.
- left : use only keys from left frame, similar to a SQL left
outer join.
- right : not supported.
- outer : use union of keys from both frames, similar to a SQL
full outer join.
- inner: use intersection of keys from both frames, similar to
a SQL inner join.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame.
Can also be an array or list of arrays of the length of the
left DataFrame. These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame.
Can also be an array or list of arrays of the length of the
right DataFrame. These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s).
right_index : bool, default False
Use the index from the right DataFrame as the join key.
sort : bool, default False
Sort the resulting dataframe by the columns that were merged on,
starting from the left.
suffixes: Tuple[str, str], defaults to ('_x', '_y')
Suffixes applied to overlapping column names on the left and right
sides
method : {‘hash’, ‘sort’}, default ‘hash’
The implementation method to be used for the operation.
Returns
-------
merged : DataFrame
Notes
-----
**DataFrames merges in cuDF result in non-deterministic row ordering.**
Examples
--------
>>> import cudf
>>> df_a = cudf.DataFrame()
>>> df_a['key'] = [0, 1, 2, 3, 4]
>>> df_a['vals_a'] = [float(i + 10) for i in range(5)]
>>> df_b = cudf.DataFrame()
>>> df_b['key'] = [1, 2, 4]
>>> df_b['vals_b'] = [float(i+10) for i in range(3)]
>>> df_merged = df_a.merge(df_b, on=['key'], how='left')
>>> df_merged.sort_values('key') # doctest: +SKIP
key vals_a vals_b
3 0 10.0
0 1 11.0 10.0
1 2 12.0 11.0
4 3 13.0
2 4 14.0 12.0
"""
if indicator:
raise NotImplementedError(
"Only indicator=False is currently supported"
)
if lsuffix or rsuffix:
raise ValueError(
"The lsuffix and rsuffix keywords have been replaced with the "
"``suffixes=`` keyword. "
"Please provide the following instead: \n\n"
" suffixes=('%s', '%s')"
% (lsuffix or "_x", rsuffix or "_y")
)
else:
lsuffix, rsuffix = suffixes
lhs = self.copy(deep=False)
rhs = right.copy(deep=False)
# Compute merge
gdf_result = super(DataFrame, lhs)._merge(
rhs,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
how=how,
sort=sort,
lsuffix=lsuffix,
rsuffix=rsuffix,
method=method,
indicator=indicator,
suffixes=suffixes,
)
return gdf_result
@annotate("JOIN", color="blue", domain="cudf_python")
def join(
self,
other,
on=None,
how="left",
lsuffix="",
rsuffix="",
sort=False,
method="hash",
):
"""Join columns with other DataFrame on index or on a key column.
Parameters
----------
other : DataFrame
how : str
Only accepts "left", "right", "inner", "outer"
lsuffix, rsuffix : str
The suffices to add to the left (*lsuffix*) and right (*rsuffix*)
column names when avoiding conflicts.
sort : bool
Set to True to ensure sorted ordering.
Returns
-------
joined : DataFrame
Notes
-----
Difference from pandas:
- *other* must be a single DataFrame for now.
- *on* is not supported yet due to lack of multi-index support.
"""
lhs = self
rhs = other
df = lhs.merge(
rhs,
left_index=True,
right_index=True,
how=how,
suffixes=(lsuffix, rsuffix),
method=method,
sort=sort,
)
df.index.name = (
None if lhs.index.name != rhs.index.name else lhs.index.name
)
return df
@copy_docstring(DataFrameGroupBy.__init__)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
dropna=True,
):
if axis not in (0, "index"):
raise NotImplementedError("axis parameter is not yet implemented")
if group_keys is not True:
raise NotImplementedError(
"The group_keys keyword is not yet implemented"
)
if squeeze is not False:
raise NotImplementedError(
"squeeze parameter is not yet implemented"
)
if observed is not False:
raise NotImplementedError(
"observed parameter is not yet implemented"
)
if by is None and level is None:
raise TypeError(
"groupby() requires either by or level to be specified."
)
return DataFrameGroupBy(
self,
by=by,
level=level,
as_index=as_index,
dropna=dropna,
sort=sort,
)
@copy_docstring(Rolling)
def rolling(
self, window, min_periods=None, center=False, axis=0, win_type=None
):
return Rolling(
self,
window,
min_periods=min_periods,
center=center,
axis=axis,
win_type=win_type,
)
def query(self, expr, local_dict=None):
"""
Query with a boolean expression using Numba to compile a GPU kernel.
See pandas.DataFrame.query.
Parameters
----------
expr : str
A boolean expression. Names in expression refer to columns.
`index` can be used instead of index name, but this is not
supported for MultiIndex.
Names starting with `@` refer to Python variables.
An output value will be `null` if any of the input values are
`null` regardless of expression.
local_dict : dict
Containing the local variable to be used in query.
Returns
-------
filtered : DataFrame
Examples
--------
>>> import cudf
>>> a = ('a', [1, 2, 2])
>>> b = ('b', [3, 4, 5])
>>> df = cudf.DataFrame([a, b])
>>> expr = "(a == 2 and b == 4) or (b == 3)"
>>> df.query(expr)
a b
0 1 3
1 2 4
DateTime conditionals:
>>> import numpy as np
>>> import datetime
>>> df = cudf.DataFrame()
>>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')
>>> df['datetimes'] = data
>>> search_date = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')
>>> df.query('datetimes==@search_date')
datetimes
1 2018-10-08T00:00:00.000
Using local_dict:
>>> import numpy as np
>>> import datetime
>>> df = cudf.DataFrame()
>>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')
>>> df['datetimes'] = data
>>> search_date2 = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')
>>> df.query('datetimes==@search_date',
... local_dict={'search_date':search_date2})
datetimes
1 2018-10-08T00:00:00.000
"""
# can't use `annotate` decorator here as we inspect the calling
# environment.
with annotate("QUERY", color="purple", domain="cudf_python"):
if local_dict is None:
local_dict = {}
if self.empty:
return self.copy()
if not isinstance(local_dict, dict):
raise TypeError(
f"local_dict type: expected dict but found "
f"{type(local_dict)}"
)
# Get calling environment
callframe = inspect.currentframe().f_back
callenv = {
"locals": callframe.f_locals,
"globals": callframe.f_globals,
"local_dict": local_dict,
}
# Run query
boolmask = queryutils.query_execute(self, expr, callenv)
return self._apply_boolean_mask(boolmask)
@applyutils.doc_apply()
def apply_rows(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls=True,
cache_key=None,
):
"""
Apply a row-wise user defined function.
Parameters
----------
{params}
Examples
--------
The user function should loop over the columns and set the output for
each row. Loop execution order is arbitrary, so each iteration of
the loop **MUST** be independent of each other.
When ``func`` is invoked, the array args corresponding to the
input/output are strided so as to improve GPU parallelism.
The loop in the function resembles serial code, but executes
concurrently in multiple threads.
>>> import cudf
>>> import numpy as np
>>> df = cudf.DataFrame()
>>> nelem = 3
>>> df['in1'] = np.arange(nelem)
>>> df['in2'] = np.arange(nelem)
>>> df['in3'] = np.arange(nelem)
Define input columns for the kernel
>>> in1 = df['in1']
>>> in2 = df['in2']
>>> in3 = df['in3']
>>> def kernel(in1, in2, in3, out1, out2, kwarg1, kwarg2):
... for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
... out1[i] = kwarg2 * x - kwarg1 * y
... out2[i] = y - kwarg1 * z
Call ``.apply_rows`` with the name of the input columns, the name and
dtype of the output columns, and, optionally, a dict of extra
arguments.
>>> df.apply_rows(kernel,
... incols=['in1', 'in2', 'in3'],
... outcols=dict(out1=np.float64, out2=np.float64),
... kwargs=dict(kwarg1=3, kwarg2=4))
in1 in2 in3 out1 out2
0 0 0 0 0.0 0.0
1 1 1 1 1.0 -2.0
2 2 2 2 2.0 -4.0
"""
for col in incols:
current_col_dtype = self._data[col].dtype
if is_string_dtype(current_col_dtype) or is_categorical_dtype(
current_col_dtype
):
raise TypeError(
"User defined functions are currently not "
"supported on Series with dtypes `str` and `category`."
)
return applyutils.apply_rows(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls,
cache_key=cache_key,
)
@applyutils.doc_applychunks()
def apply_chunks(
self,
func,
incols,
outcols,
kwargs=None,
pessimistic_nulls=True,
chunks=None,
blkct=None,
tpb=None,
):
"""
Transform user-specified chunks using the user-provided function.
Parameters
----------
{params}
{params_chunks}
Examples
--------
For ``tpb > 1``, ``func`` is executed by ``tpb`` number of threads
concurrently. To access the thread id and count,
use ``numba.cuda.threadIdx.x`` and ``numba.cuda.blockDim.x``,
respectively (See `numba CUDA kernel documentation`_).
.. _numba CUDA kernel documentation:\
http://numba.pydata.org/numba-doc/latest/cuda/kernels.html
In the example below, the *kernel* is invoked concurrently on each
specified chunk. The *kernel* computes the corresponding output
for the chunk.
By looping over the range
``range(cuda.threadIdx.x, in1.size, cuda.blockDim.x)``, the *kernel*
function can be used with any *tpb* in an efficient manner.
>>> from numba import cuda
>>> @cuda.jit
... def kernel(in1, in2, in3, out1):
... for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x):
... x = in1[i]
... y = in2[i]
... z = in3[i]
... out1[i] = x * y + z
See also
--------
DataFrame.apply_rows
"""
if kwargs is None:
kwargs = {}
if chunks is None:
raise ValueError("*chunks* must be defined")
return applyutils.apply_chunks(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls,
chunks,
tpb=tpb,
)
def hash_columns(self, columns=None):
"""Hash the given *columns* and return a new device array
Parameters
----------
columns : sequence of str; optional
Sequence of column names. If columns is *None* (unspecified),
all columns in the frame are used.
"""
if columns is None:
table_to_hash = self
else:
cols = [self[k]._column for k in columns]
table_to_hash = Frame(data=OrderedColumnDict(zip(columns, cols)))
return Series(table_to_hash._hash()).values
def partition_by_hash(self, columns, nparts, keep_index=True):
"""Partition the dataframe by the hashed value of data in *columns*.
Parameters
----------
columns : sequence of str
The names of the columns to be hashed.
Must have at least one name.
nparts : int
Number of output partitions
keep_index : boolean
Whether to keep the index or drop it
Returns
-------
partitioned: list of DataFrame
"""
idx = (
0
if (self._index is None or keep_index is False)
else self._index._num_columns
)
key_indices = [self._data.names.index(k) + idx for k in columns]
outdf, offsets = self._hash_partition(key_indices, nparts, keep_index)
# Slice into partition
return [outdf[s:e] for s, e in zip(offsets, offsets[1:] + [None])]
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method=None,
):
"""
Replace values given in *to_replace* with *replacement*.
Parameters
----------
to_replace : numeric, str, list-like or dict
Value(s) to replace.
* numeric or str:
- values equal to *to_replace* will be replaced
with *replacement*
* list of numeric or str:
- If *replacement* is also list-like,
*to_replace* and *replacement* must be of same length.
* dict:
- Dicts can be used to replace different values in different
columns. For example, `{'a': 1, 'z': 2}` specifies that the
value 1 in column `a` and the value 2 in column `z` should be
replaced with replacement*.
value : numeric, str, list-like, or dict
Value(s) to replace `to_replace` with. If a dict is provided, then
its keys must match the keys in *to_replace*, and corresponding
values must be compatible (e.g., if they are lists, then they must
match in length).
inplace : bool, default False
If True, in place.
Returns
-------
result : DataFrame
DataFrame after replacement.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['id']= [0, 1, 2, -1, 4, -1, 6]
>>> df['id']= df['id'].replace(-1, None)
>>> df
id
0 0
1 1
2 2
3 <NA>
4 4
5 <NA>
6 6
Notes
-----
Parameters that are currently not supported are: `limit`, `regex`,
`method`
"""
if limit is not None:
raise NotImplementedError("limit parameter is not implemented yet")
if regex:
raise NotImplementedError("regex parameter is not implemented yet")
if method not in ("pad", None):
raise NotImplementedError(
"method parameter is not implemented yet"
)
outdf = super().replace(to_replace=to_replace, replacement=value)
return self._mimic_inplace(outdf, inplace=inplace)
def info(
self,
verbose=None,
buf=None,
max_cols=None,
memory_usage=None,
null_counts=None,
):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> import cudf
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = cudf.DataFrame({"int_col": int_values,
... "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout,
get buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f:
... f.write(s)
...
369
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> import numpy as np
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = cudf.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info(memory_usage='deep')
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 14.3 MB
"""
if buf is None:
buf = sys.stdout
lines = [str(type(self))]
index_name = type(self._index).__name__
if len(self._index) > 0:
entries_summary = f", {self._index[0]} to {self._index[-1]}"
else:
entries_summary = ""
index_summary = (
f"{index_name}: {len(self._index)} entries{entries_summary}"
)
lines.append(index_summary)
if len(self.columns) == 0:
lines.append(f"Empty {type(self).__name__}")
cudf.utils.ioutils.buffer_write_lines(buf, lines)
return
cols = self.columns
col_count = len(self.columns)
if max_cols is None:
max_cols = pd.options.display.max_info_columns
max_rows = pd.options.display.max_info_rows
if null_counts is None:
show_counts = (col_count <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
exceeds_info_cols = col_count > max_cols
def _put_str(s, space):
return str(s)[:space].ljust(space)
def _verbose_repr():
lines.append(f"Data columns (total {len(self.columns)} columns):")
id_head = " # "
column_head = "Column"
col_space = 2
max_col = max(len(pprint_thing(k)) for k in cols)
len_column = len(pprint_thing(column_head))
space = max(max_col, len_column) + col_space
max_id = len(pprint_thing(col_count))
len_id = len(pprint_thing(id_head))
space_num = max(max_id, len_id) + col_space
counts = None
header = _put_str(id_head, space_num) + _put_str(
column_head, space
)
if show_counts:
counts = self.count().to_pandas().tolist()
if len(cols) != len(counts):
raise AssertionError(
f"Columns must equal "
f"counts ({len(cols)} != {len(counts)})"
)
count_header = "Non-Null Count"
len_count = len(count_header)
non_null = " non-null"
max_count = max(len(pprint_thing(k)) for k in counts) + len(
non_null
)
space_count = max(len_count, max_count) + col_space
count_temp = "{count}" + non_null
else:
count_header = ""
space_count = len(count_header)
len_count = space_count
count_temp = "{count}"
dtype_header = "Dtype"
len_dtype = len(dtype_header)
max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes)
space_dtype = max(len_dtype, max_dtypes)
header += (
_put_str(count_header, space_count)
+ _put_str(dtype_header, space_dtype).rstrip()
)
lines.append(header)
lines.append(
_put_str("-" * len_id, space_num)
+ _put_str("-" * len_column, space)
+ _put_str("-" * len_count, space_count)
+ _put_str("-" * len_dtype, space_dtype).rstrip()
)
for i, col in enumerate(self.columns):
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
line_no = _put_str(" {num}".format(num=i), space_num)
count = ""
if show_counts:
count = counts[i]
lines.append(
line_no
+ _put_str(col, space)
+ _put_str(count_temp.format(count=count), space_count)
+ _put_str(dtype, space_dtype).rstrip()
)
def _non_verbose_repr():
if len(self.columns) > 0:
entries_summary = f", {self.columns[0]} to {self.columns[-1]}"
else:
entries_summary = ""
columns_summary = (
f"Columns: {len(self.columns)} entries{entries_summary}"
)
lines.append(columns_summary)
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f}{size_qualifier} {x}"
num /= 1024.0
return f"{num:3.1f}{size_qualifier} PB"
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
dtype_counts = defaultdict(int)
for col in self._data:
dtype_counts[self._data[col].dtype.name] += 1
dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(dtype_counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
memory_usage = pd.options.display.memory_usage
if memory_usage:
# append memory usage of df to display
size_qualifier = ""
if memory_usage == "deep":
deep = True
else:
deep = False
if "object" in dtype_counts or self.index.dtype == "object":
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append(
f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n"
)
cudf.utils.ioutils.buffer_write_lines(buf, lines)
@docutils.doc_describe()
def describe(
self,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
):
"""{docstring}"""
if not include and not exclude:
default_include = [np.number]
if datetime_is_numeric:
default_include.append("datetime")
data_to_describe = self.select_dtypes(include=default_include)
if len(data_to_describe.columns) == 0:
data_to_describe = self
elif include == "all":
if exclude is not None:
raise ValueError("exclude must be None when include is 'all'")
data_to_describe = self
else:
data_to_describe = self.select_dtypes(
include=include, exclude=exclude
)
if data_to_describe.empty:
raise ValueError("No data of included types.")
describe_series_list = [
data_to_describe[col].describe(percentiles=percentiles)
for col in data_to_describe.columns
]
if len(describe_series_list) == 1:
return describe_series_list[0].to_frame()
else:
ldesc_indexes = sorted(
(x.index for x in describe_series_list), key=len
)
names = OrderedDict.fromkeys(
[
name
for idxnames in ldesc_indexes
for name in idxnames.to_pandas()
],
None,
)
return cudf.concat(
[
series.reindex(names, copy=False)
for series in describe_series_list
],
axis=1,
sort=False,
)
def to_pandas(self, nullable=False, **kwargs):
"""
Convert to a Pandas DataFrame.
Parameters
----------
nullable : Boolean, Default False
If ``nullable`` is ``True``, the resulting columns
in the dataframe will be having a corresponding
nullable Pandas dtype. If ``nullable`` is ``False``,
the resulting columns will either convert null
values to ``np.nan`` or ``None`` depending on the dtype.
Returns
-------
out : Pandas DataFrame
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [0, 1, 2], 'b': [-3, 2, 0]})
>>> pdf = df.to_pandas()
>>> pdf
a b
0 0 -3
1 1 2
2 2 0
>>> type(pdf)
<class 'pandas.core.frame.DataFrame'>
``nullable`` parameter can be used to control
whether dtype can be Pandas Nullable or not:
>>> df = cudf.DataFrame({'a': [0, None, 2], 'b': [True, False, None]})
>>> df
a b
0 0 True
1 <NA> False
2 2 <NA>
>>> pdf = df.to_pandas(nullable=True)
>>> pdf
a b
0 0 True
1 <NA> False
2 2 <NA>
>>> pdf.dtypes
a Int64
b boolean
dtype: object
>>> pdf = df.to_pandas(nullable=False)
>>> pdf
a b
0 0.0 True
1 NaN False
2 2.0 None
>>> pdf.dtypes
a float64
b object
dtype: object
"""
out_data = {}
out_index = self.index.to_pandas()
if not isinstance(self.columns, pd.Index):
out_columns = self.columns.to_pandas()
else:
out_columns = self.columns
for i, col_key in enumerate(self._data):
out_data[i] = self._data[col_key].to_pandas(
index=out_index, nullable=nullable
)
if isinstance(self.columns, Index):
out_columns = self.columns.to_pandas()
if isinstance(self.columns, cudf.core.multiindex.MultiIndex):
if self.columns.names is not None:
out_columns.names = self.columns.names
else:
out_columns.name = self.columns.name
out_df = pd.DataFrame(out_data, index=out_index)
out_df.columns = out_columns
return out_df
@classmethod
def from_pandas(cls, dataframe, nan_as_null=None):
"""
Convert from a Pandas DataFrame.
Parameters
----------
dataframe : Pandas DataFrame object
A Pandads DataFrame object which has to be converted
to cuDF DataFrame.
nan_as_null : bool, Default True
If ``True``, converts ``np.nan`` values to ``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> data = [[0,1], [1,2], [3,4]]
>>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)
>>> cudf.from_pandas(pdf)
a b
0 0 1
1 1 2
2 3 4
"""
if not isinstance(dataframe, pd.DataFrame):
raise TypeError("not a pandas.DataFrame")
if not dataframe.columns.is_unique:
raise ValueError("Duplicate column names are not allowed")
df = cls()
# Set columns
for col_name, col_value in dataframe.iteritems():
# necessary because multi-index can return multiple
# columns for a single key
if len(col_value.shape) == 1:
df[col_name] = column.as_column(
col_value.array, nan_as_null=nan_as_null
)
else:
vals = col_value.values.T
if vals.shape[0] == 1:
df[col_name] = column.as_column(
vals.flatten(), nan_as_null=nan_as_null
)
else:
if isinstance(col_name, tuple):
col_name = str(col_name)
for idx in range(len(vals.shape)):
df[col_name] = column.as_column(
vals[idx], nan_as_null=nan_as_null
)
# Set columns only if it is a MultiIndex
if isinstance(dataframe.columns, pd.MultiIndex):
df.columns = dataframe.columns
# Set index
index = cudf.from_pandas(dataframe.index, nan_as_null=nan_as_null)
result = df.set_index(index)
return result
@classmethod
def from_arrow(cls, table):
"""
Convert from PyArrow Table to DataFrame.
Parameters
----------
table : PyArrow Table Object
PyArrow Table Object which has to be converted to cudf DataFrame.
Raises
------
TypeError for invalid input type.
Returns
-------
cudf DataFrame
Notes
-----
- Does not support automatically setting index column(s) similar
to how ``to_pandas`` works for PyArrow Tables.
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> data = pa.table({"a":[1, 2, 3], "b":[4, 5, 6]})
>>> cudf.DataFrame.from_arrow(data)
a b
0 1 4
1 2 5
2 3 6
"""
index_col = None
if isinstance(table, pa.Table) and isinstance(
table.schema.pandas_metadata, dict
):
index_col = table.schema.pandas_metadata["index_columns"]
out = super().from_arrow(table)
if index_col:
if isinstance(index_col[0], dict):
out = out.set_index(
cudf.RangeIndex(
index_col[0]["start"],
index_col[0]["stop"],
name=index_col[0]["name"],
)
)
else:
out = out.set_index(index_col[0])
return out
def to_arrow(self, preserve_index=True):
"""
Convert to a PyArrow Table.
Parameters
----------
preserve_index : bool, default True
whether index column and its meta data needs to be saved or not
Returns
-------
PyArrow Table
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame(
... {"a":[1, 2, 3], "b":[4, 5, 6]}, index=[1, 2, 3])
>>> df.to_arrow()
pyarrow.Table
a: int64
b: int64
index: int64
>>> df.to_arrow(preserve_index=False)
pyarrow.Table
a: int64
b: int64
"""
data = self.copy(deep=False)
index_descr = []
if preserve_index:
if isinstance(self.index, cudf.RangeIndex):
descr = {
"kind": "range",
"name": self.index.name,
"start": self.index._start,
"stop": self.index._stop,
"step": 1,
}
else:
if isinstance(self.index, cudf.MultiIndex):
gen_names = tuple(
f"level_{i}"
for i, _ in enumerate(self.index._data.names)
)
else:
gen_names = (
self.index.names
if self.index.name is not None
else ("index",)
)
for gen_name, col_name in zip(
gen_names, self.index._data.names
):
data.insert(
data.shape[1], gen_name, self.index._data[col_name]
)
descr = gen_names[0]
index_descr.append(descr)
out = super(DataFrame, data).to_arrow()
metadata = pa.pandas_compat.construct_metadata(
self,
out.schema.names,
[self.index],
index_descr,
preserve_index,
types=out.schema.types,
)
return out.replace_schema_metadata(metadata)
def to_records(self, index=True):
"""Convert to a numpy recarray
Parameters
----------
index : bool
Whether to include the index in the output.
Returns
-------
numpy recarray
"""
members = [("index", self.index.dtype)] if index else []
members += [(col, self[col].dtype) for col in self._data.names]
dtype = np.dtype(members)
ret = np.recarray(len(self), dtype=dtype)
if index:
ret["index"] = self.index.to_array()
for col in self._data.names:
ret[col] = self[col].to_array()
return ret
@classmethod
def from_records(cls, data, index=None, columns=None, nan_as_null=False):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : numpy structured dtype or recarray of ndim=2
index : str, array-like
The name of the index column in *data*.
If None, the default index is used.
columns : list of str
List of column names to include.
Returns
-------
DataFrame
"""
if data.ndim != 1 and data.ndim != 2:
raise ValueError(
f"records dimension expected 1 or 2 but found {data.ndim}"
)
num_cols = len(data[0])
if columns is None and data.dtype.names is None:
names = [i for i in range(num_cols)]
elif data.dtype.names is not None:
names = data.dtype.names
else:
if len(columns) != num_cols:
raise ValueError(
f"columns length expected {num_cols} "
f"but found {len(columns)}"
)
names = columns
df = DataFrame()
if data.ndim == 2:
for i, k in enumerate(names):
df._data[k] = column.as_column(
data[:, i], nan_as_null=nan_as_null
)
elif data.ndim == 1:
for k in names:
df._data[k] = column.as_column(
data[k], nan_as_null=nan_as_null
)
if index is None:
df._index = RangeIndex(start=0, stop=len(data))
elif is_scalar(index):
df._index = RangeIndex(start=0, stop=len(data))
df = df.set_index(index)
else:
df._index = as_index(index)
return df
@classmethod
def _from_arrays(cls, data, index=None, columns=None, nan_as_null=False):
"""Convert a numpy/cupy array to DataFrame.
Parameters
----------
data : numpy/cupy array of ndim 1 or 2,
dimensions greater than 2 are not supported yet.
index : Index or array-like
Index to use for resulting frame. Will default to
RangeIndex if no indexing information part of input data and
no index provided.
columns : list of str
List of column names to include.
Returns
-------
DataFrame
"""
data = cupy.asarray(data)
if data.ndim != 1 and data.ndim != 2:
raise ValueError(
f"records dimension expected 1 or 2 but found: {data.ndim}"
)
if data.ndim == 2:
num_cols = len(data[0])
else:
# Since we validate ndim to be either 1 or 2 above,
# this case can be assumed to be ndim == 1.
num_cols = 1
if columns is None:
names = [i for i in range(num_cols)]
else:
if len(columns) != num_cols:
raise ValueError(
f"columns length expected {num_cols} but "
f"found {len(columns)}"
)
names = columns
df = cls()
if data.ndim == 2:
for i, k in enumerate(names):
df._data[k] = column.as_column(
data[:, i], nan_as_null=nan_as_null
)
elif data.ndim == 1:
df._data[names[0]] = column.as_column(
data, nan_as_null=nan_as_null
)
if index is None:
df._index = RangeIndex(start=0, stop=len(data))
else:
df._index = as_index(index)
return df
@classmethod
def _from_columns(cls, cols, index=None, columns=None):
"""
Construct a DataFrame from a list of Columns
"""
if columns is not None:
data = dict(zip(columns, cols))
else:
data = dict(enumerate(cols))
return cls(data=data, index=index,)
def quantile(
self,
q=0.5,
axis=0,
numeric_only=True,
interpolation="linear",
columns=None,
exact=True,
):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like
0 <= q <= 1, the quantile(s) to compute
axis : int
axis is a NON-FUNCTIONAL parameter
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {`linear`, `lower`, `higher`, `midpoint`, `nearest`}
This parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j.
Default ``linear``.
columns : list of str
List of column names to include.
exact : boolean
Whether to use approximate or exact quantile algorithm.
Returns
-------
Series or DataFrame
If q is an array or numeric_only is set to False, a DataFrame
will be returned where index is q, the columns are the columns
of self, and the values are the quantile.
If q is a float, a Series will be returned where the index is
the columns of self and the values are the quantiles.
Notes
-----
One notable difference from Pandas is when DataFrame is of
non-numeric types and result is expected to be a Series in case of
Pandas. cuDF will return a DataFrame as it doesn't support mixed
types under Series.
"""
if axis not in (0, None):
raise NotImplementedError("axis is not implemented yet")
if numeric_only:
data_df = self.select_dtypes(
include=[np.number], exclude=["datetime64", "timedelta64"]
)
else:
data_df = self
if columns is None:
columns = data_df._data.names
result = DataFrame()
for k in data_df._data.names:
if k in columns:
res = data_df[k].quantile(
q,
interpolation=interpolation,
exact=exact,
quant_index=False,
)
if (
not isinstance(
res, (numbers.Number, pd.Timestamp, pd.Timedelta)
)
and len(res) == 0
):
res = column.column_empty_like(
q, dtype=data_df[k].dtype, masked=True, newsize=len(q)
)
result[k] = column.as_column(res)
if isinstance(q, numbers.Number) and numeric_only:
result = result.fillna(np.nan)
result = result.iloc[0]
result.index = as_index(data_df.columns)
result.name = q
return result
else:
q = list(map(float, [q] if isinstance(q, numbers.Number) else q))
result.index = q
return result
def quantiles(self, q=0.5, interpolation="nearest"):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like
0 <= q <= 1, the quantile(s) to compute
interpolation : {`lower`, `higher`, `nearest`}
This parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j.
Default 'nearest'.
Returns
-------
DataFrame
"""
if isinstance(q, numbers.Number):
q_is_number = True
q = [float(q)]
elif pd.api.types.is_list_like(q):
q_is_number = False
else:
msg = "`q` must be either a single element or list"
raise TypeError(msg)
result = self._quantiles(q, interpolation.upper())
if q_is_number:
result = result.transpose()
return Series(
data=result._columns[0], index=result.index, name=q[0]
)
else:
result.index = as_index(q)
return result
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all
the labels match. If values is a Series, that’s the index.
If values is a dict, the keys must be the column names,
which must match. If values is a DataFrame, then both the
index and column labels must match.
Returns
-------
DataFrame:
DataFrame of booleans showing whether each element in
the DataFrame is contained in values.
"""
if isinstance(values, dict):
result_df = DataFrame()
for col in self._data.names:
if col in values:
val = values[col]
result_df[col] = self._data[col].isin(val)
else:
result_df[col] = column.full(
size=len(self), fill_value=False, dtype="bool"
)
result_df.index = self.index
return result_df
elif isinstance(values, Series):
values = values.reindex(self.index)
result = DataFrame()
for col in self._data.names:
if isinstance(
self[col]._column, cudf.core.column.CategoricalColumn
) and isinstance(
values._column, cudf.core.column.CategoricalColumn
):
res = self._data[col] == values._column
result[col] = res
elif (
isinstance(
self[col]._column, cudf.core.column.CategoricalColumn
)
or np.issubdtype(self[col].dtype, np.dtype("object"))
) or (
isinstance(
values._column, cudf.core.column.CategoricalColumn
)
or np.issubdtype(values.dtype, np.dtype("object"))
):
result[col] = utils.scalar_broadcast_to(False, len(self))
else:
result[col] = self._data[col] == values._column
result.index = self.index
return result
elif isinstance(values, DataFrame):
values = values.reindex(self.index)
result = DataFrame()
for col in self._data.names:
if col in values.columns:
result[col] = self._data[col] == values[col]._column
else:
result[col] = utils.scalar_broadcast_to(False, len(self))
result.index = self.index
return result
else:
if not is_list_like(values):
raise TypeError(
f"only list-like or dict-like objects are "
f"allowed to be passed to DataFrame.isin(), "
f"you passed a "
f"'{type(values).__name__}'"
)
result_df = DataFrame()
for col in self._data.names:
result_df[col] = self._data[col].isin(values)
result_df.index = self.index
return result_df
#
# Stats
#
def _prepare_for_rowwise_op(self, method, skipna):
"""Prepare a DataFrame for CuPy-based row-wise operations.
"""
if method not in _cupy_nan_methods_map and any(
col.nullable for col in self._columns
):
msg = (
f"Row-wise operations to calculate '{method}' is not "
f"currently support columns with null values. "
f"Consider removing them with .dropna() "
f"or using .fillna()."
)
raise ValueError(msg)
is_pure_dt = all(is_datetime_dtype(dt) for dt in self.dtypes)
if not is_pure_dt:
filtered = self.select_dtypes(include=[np.number, np.bool])
else:
filtered = self.copy(deep=False)
common_dtype = find_common_type(filtered.dtypes)
if filtered._num_columns < self._num_columns:
msg = (
"Row-wise operations currently only support int, float "
"and bool dtypes. Non numeric columns are ignored."
)
warnings.warn(msg)
if not skipna and any(col.nullable for col in filtered._columns):
mask = cudf.DataFrame(
{
name: filtered._data[name]._get_mask_as_column()
if filtered._data[name].nullable
else column.full(len(filtered._data[name]), True)
for name in filtered._data.names
}
)
mask = mask.all(axis=1)
else:
mask = None
coerced = filtered.astype(common_dtype, copy=False)
if is_pure_dt:
# Further convert into cupy friendly types
coerced = coerced.astype("int64", copy=False)
return coerced, mask, common_dtype
def count(self, axis=0, level=None, numeric_only=False, **kwargs):
"""
Count ``non-NA`` cells for each column or row.
The values ``None``, ``NaN``, ``NaT`` are considered ``NA``.
Returns
-------
Series
For each column/row the number of non-NA/null entries.
Notes
-----
Parameters currently not supported are `axis`, `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> df = cudf.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._apply_support_method(
"count",
axis=axis,
level=level,
numeric_only=numeric_only,
**kwargs,
)
def min(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs,
):
"""
Return the minimum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
level: int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only: bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.min()
a 1
b 7
dtype: int64
"""
return self._apply_support_method(
"min",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
def max(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs,
):
"""
Return the maximum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
level: int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only: bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.max()
a 4
b 10
dtype: int64
"""
return self._apply_support_method(
"max",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
def sum(
self,
axis=None,
skipna=None,
dtype=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return sum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
dtype: data type
Data type to cast the result to.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.sum()
a 10
b 34
dtype: int64
"""
return self._apply_support_method(
"sum",
axis=axis,
skipna=skipna,
dtype=dtype,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
def product(
self,
axis=None,
skipna=None,
dtype=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return product of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
dtype: data type
Data type to cast the result to.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Notes
-----
Parameters currently not supported are level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.product()
a 24
b 5040
dtype: int64
"""
return self._apply_support_method(
"prod",
axis=axis,
skipna=skipna,
dtype=dtype,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
def prod(
self,
axis=None,
skipna=None,
dtype=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return product of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
dtype: data type
Data type to cast the result to.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
scalar
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.prod()
a 24
b 5040
dtype: int64
"""
return self.product(
axis=axis,
skipna=skipna,
dtype=dtype,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""
Return cumulative minimum of the DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
DataFrame
Notes
-----
Parameters currently not supported is `axis`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.cummin()
a b
0 1 7
1 1 7
2 1 7
3 1 7
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._apply_support_method(
"cummin", axis=axis, skipna=skipna, *args, **kwargs
)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""
Return cumulative maximum of the DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
DataFrame
Notes
-----
Parameters currently not supported is `axis`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.cummax()
a b
0 1 7
1 2 8
2 3 9
3 4 10
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._apply_support_method(
"cummax", axis=axis, skipna=skipna, *args, **kwargs
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""
Return cumulative sum of the DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
DataFrame
Notes
-----
Parameters currently not supported is `axis`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> s.cumsum()
a b
0 1 7
1 3 15
2 6 24
3 10 34
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._apply_support_method(
"cumsum", axis=axis, skipna=skipna, *args, **kwargs
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""
Return cumulative product of the DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
DataFrame
Notes
-----
Parameters currently not supported is `axis`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> s.cumprod()
a b
0 1 7
1 2 56
2 6 504
3 24 5040
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._apply_support_method(
"cumprod", axis=axis, skipna=skipna, *args, **kwargs
)
def mean(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
"""
Return the mean of the values for the requested axis.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data. Not implemented for
Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
mean : Series or DataFrame (if level specified)
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.mean()
a 2.5
b 8.5
dtype: float64
"""
return self._apply_support_method(
"mean",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
- 0 or 'index' : get mode of each column
- 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NA/NaN/NaT.
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
cudf.core.series.Series.mode : Return the highest frequency value
in a Series.
cudf.core.series.Series.value_counts : Return the counts of values
in a Series.
Notes
-----
``axis`` parameter is currently not supported.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({
... "species": ["bird", "mammal", "arthropod", "bird"],
... "legs": [2, 4, 8, 2],
... "wings": [2.0, None, 0.0, None]
... })
>>> df
species legs wings
0 bird 2 2.0
1 mammal 4 <NA>
2 arthropod 8 0.0
3 bird 2 <NA>
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NA``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2 0.0
1 <NA> <NA> 2.0
Setting ``dropna=False``, ``NA`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 <NA>
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2 0.0
1 <NA> 2.0
"""
if axis not in (0, "index"):
raise NotImplementedError("Only axis=0 is currently supported")
if numeric_only:
data_df = self.select_dtypes(
include=[np.number], exclude=["datetime64", "timedelta64"]
)
else:
data_df = self
mode_results = [
data_df[col].mode(dropna=dropna) for col in data_df._data
]
if len(mode_results) == 0:
df = DataFrame(index=self.index)
return df
df = cudf.concat(mode_results, axis=1)
if isinstance(df, Series):
df = df.to_frame()
df.columns = data_df.columns
return df
def std(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
"""
Return sample standard deviation of the DataFrame.
Normalized by N-1 by default. This can be changed using
the `ddof` argument
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is N - ddof, where N represents the number of elements.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.std()
a 1.290994
b 1.290994
dtype: float64
"""
return self._apply_support_method(
"std",
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
def var(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
"""
Return unbiased variance of the DataFrame.
Normalized by N-1 by default. This can be changed using the
ddof argument
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations is
N - ddof, where N represents the number of elements.
Returns
-------
scalar
Notes
-----
Parameters currently not supported are `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.var()
a 1.666667
b 1.666667
dtype: float64
"""
return self._apply_support_method(
"var",
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
def kurtosis(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
"""
Return Fisher's unbiased kurtosis of a sample.
Kurtosis obtained using Fisher’s definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
skipna: bool, default True
Exclude NA/null values when computing the result.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `axis`, `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.kurt()
a -1.2
b -1.2
dtype: float64
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
if numeric_only not in (None, True):
msg = "Kurtosis only supports int, float, and bool dtypes."
raise NotImplementedError(msg)
self = self.select_dtypes(include=[np.number, np.bool])
return self._apply_support_method(
"kurtosis",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
# Alias for kurtosis.
kurt = kurtosis
def skew(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
"""
Return unbiased Fisher-Pearson skew of a sample.
Parameters
----------
skipna: bool, default True
Exclude NA/null values when computing the result.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `axis`, `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 8, 10, 10]})
>>> df.skew()
a 0.00000
b -0.37037
dtype: float64
"""
if axis not in (0, "index", None):
raise NotImplementedError("Only axis=0 is currently supported.")
if numeric_only not in (None, True):
msg = "Skew only supports int, float, and bool dtypes."
raise NotImplementedError(msg)
self = self.select_dtypes(include=[np.number, np.bool])
return self._apply_support_method(
"skew",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""
Return whether all elements are True in DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If the entire row/column is NA and
skipna is True, then the result will be True, as for an
empty row/column.
If skipna is False, then NA are treated as True, because
these are not equal to zero.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `axis`, `bool_only`, `level`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 0, 10, 10]})
>>> df.all()
a True
b False
dtype: bool
"""
if bool_only:
return self.select_dtypes(include="bool")._apply_support_method(
"all",
axis=axis,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
return self._apply_support_method(
"all",
axis=axis,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""
Return whether any elements is True in DataFrame.
Parameters
----------
skipna: bool, default True
Exclude NA/null values. If the entire row/column is NA and
skipna is True, then the result will be False, as for an
empty row/column.
If skipna is False, then NA are treated as True, because
these are not equal to zero.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `axis`, `bool_only`, `level`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 0, 10, 10]})
>>> df.any()
a True
b True
dtype: bool
"""
if bool_only:
return self.select_dtypes(include="bool")._apply_support_method(
"any",
axis=axis,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
return self._apply_support_method(
"any",
axis=axis,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
def _apply_support_method(self, method, axis=0, *args, **kwargs):
assert axis in (None, 0, 1)
if axis in (None, 0):
result = [
getattr(self[col], method)(*args, **kwargs)
for col in self._data.names
]
if isinstance(result[0], Series):
support_result = result
result = DataFrame(index=support_result[0].index)
for idx, col in enumerate(self._data.names):
result[col] = support_result[idx]
else:
result = Series(result)
result = result.set_index(self._data.names)
return result
elif axis == 1:
# for dask metadata compatibility
skipna = kwargs.pop("skipna", None)
if method not in _cupy_nan_methods_map and skipna not in (
None,
True,
1,
):
raise NotImplementedError(
f"Row-wise operation to calculate '{method}'"
f" currently do not support `skipna=False`."
)
level = kwargs.pop("level", None)
if level not in (None,):
raise NotImplementedError(
"Row-wise operations currently do not support `level`."
)
numeric_only = kwargs.pop("numeric_only", None)
if numeric_only not in (None, True):
raise NotImplementedError(
"Row-wise operations currently do not "
"support `numeric_only=False`."
)
min_count = kwargs.pop("min_count", None)
if min_count not in (None, 0):
raise NotImplementedError(
"Row-wise operations currently do not "
"support `min_count`."
)
bool_only = kwargs.pop("bool_only", None)
if bool_only not in (None, True):
raise NotImplementedError(
"Row-wise operations currently do not "
"support `bool_only`."
)
prepared, mask, common_dtype = self._prepare_for_rowwise_op(
method, skipna
)
for col in prepared._data.names:
if prepared._data[col].nullable:
prepared._data[col] = (
prepared._data[col]
.astype(
cudf.utils.dtypes.get_min_float_dtype(
prepared._data[col]
)
if not is_datetime_dtype(common_dtype)
else np.dtype("float64")
)
.fillna(np.nan)
)
arr = cupy.asarray(prepared.as_gpu_matrix())
if skipna is not False and method in _cupy_nan_methods_map:
method = _cupy_nan_methods_map[method]
result = getattr(cupy, method)(arr, axis=1, **kwargs)
if result.ndim == 1:
type_coerced_methods = {
"count",
"min",
"max",
"sum",
"prod",
"cummin",
"cummax",
"cumsum",
"cumprod",
}
result_dtype = (
common_dtype
if method in type_coerced_methods
or is_datetime_dtype(common_dtype)
else None
)
result = column.as_column(result, dtype=result_dtype)
if mask is not None:
result = result.set_mask(
cudf._lib.transform.bools_to_mask(mask._column)
)
return Series(result, index=self.index, dtype=result_dtype,)
else:
result_df = DataFrame(result).set_index(self.index)
result_df.columns = prepared.columns
return result_df
def _columns_view(self, columns):
"""
Return a subset of the DataFrame's columns as a view.
"""
result_columns = OrderedDict({})
for col in columns:
result_columns[col] = self._data[col]
return DataFrame(result_columns, index=self.index)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of the DataFrame’s columns based on the column dtypes.
Parameters
----------
include : str or list
which columns to include based on dtypes
exclude : str or list
which columns to exclude based on dtypes
Returns
-------
DataFrame
The subset of the frame including the dtypes
in ``include`` and excluding the dtypes in ``exclude``.
Raises
------
ValueError
- If both of ``include`` and ``exclude`` are empty
- If ``include`` and ``exclude`` have overlapping elements
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
# code modified from:
# https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L3196
if not isinstance(include, (list, tuple)):
include = (include,) if include is not None else ()
if not isinstance(exclude, (list, tuple)):
exclude = (exclude,) if exclude is not None else ()
df = DataFrame(index=self.index)
# cudf_dtype_from_pydata_dtype can distinguish between
# np.float and np.number
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError(
"at least one of include or exclude must be nonempty"
)
include, exclude = map(
lambda x: frozenset(map(cudf_dtype_from_pydata_dtype, x)),
selection,
)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(
f"include and exclude overlap on {(include & exclude)}"
)
# include all subtypes
include_subtypes = set()
for dtype in self.dtypes:
for i_dtype in include:
# category handling
if is_categorical_dtype(i_dtype):
include_subtypes.add(i_dtype)
elif issubclass(dtype.type, i_dtype):
include_subtypes.add(dtype.type)
# exclude all subtypes
exclude_subtypes = set()
for dtype in self.dtypes:
for e_dtype in exclude:
# category handling
if is_categorical_dtype(e_dtype):
exclude_subtypes.add(e_dtype)
elif issubclass(dtype.type, e_dtype):
exclude_subtypes.add(dtype.type)
include_all = set(
[cudf_dtype_from_pydata_dtype(d) for d in self.dtypes]
)
if include:
inclusion = include_all & include_subtypes
elif exclude:
inclusion = include_all
else:
inclusion = set()
# remove all exclude types
inclusion = inclusion - exclude_subtypes
for k, col in self._data.items():
infered_type = cudf_dtype_from_pydata_dtype(col.dtype)
if infered_type in inclusion:
df.insert(len(df._data), k, col)
return df
@ioutils.doc_to_parquet()
def to_parquet(self, path, *args, **kwargs):
"""{docstring}"""
from cudf.io import parquet as pq
return pq.to_parquet(self, path, *args, **kwargs)
@ioutils.doc_to_feather()
def to_feather(self, path, *args, **kwargs):
"""{docstring}"""
from cudf.io import feather as feather
feather.to_feather(self, path, *args, **kwargs)
@ioutils.doc_to_json()
def to_json(self, path_or_buf=None, *args, **kwargs):
"""{docstring}"""
from cudf.io import json as json
return json.to_json(self, path_or_buf=path_or_buf, *args, **kwargs)
@ioutils.doc_to_hdf()
def to_hdf(self, path_or_buf, key, *args, **kwargs):
"""{docstring}"""
from cudf.io import hdf as hdf
hdf.to_hdf(path_or_buf, key, self, *args, **kwargs)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
from cudf.io import dlpack as dlpack
return dlpack.to_dlpack(self)
@ioutils.doc_dataframe_to_csv()
def to_csv(
self,
path_or_buf=None,
sep=",",
na_rep="",
columns=None,
header=True,
index=True,
line_terminator="\n",
chunksize=None,
):
"""{docstring}"""
from cudf.io import csv as csv
return csv.to_csv(
self,
path_or_buf=path_or_buf,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
index=index,
line_terminator=line_terminator,
chunksize=chunksize,
)
@ioutils.doc_to_orc()
def to_orc(self, fname, compression=None, *args, **kwargs):
"""{docstring}"""
from cudf.io import orc as orc
orc.to_orc(self, fname, compression, *args, **kwargs)
def stack(self, level=-1, dropna=True):
"""Stack the prescribed level(s) from columns to index
Return a reshaped Series
Parameters
----------
dropna : bool, default True
Whether to drop rows in the resulting Series with missing values.
Returns
-------
The stacked cudf.Series
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a':[0,1,3], 'b':[1,2,4]})
>>> df.stack()
0 a 0
b 1
1 a 1
b 2
2 a 3
b 4
dtype: int64
"""
assert level in (None, -1)
repeated_index = self.index.repeat(self.shape[1])
name_index = Frame({0: self._column_names}).tile(self.shape[0])
new_index = list(repeated_index._columns) + [name_index._columns[0]]
new_index = cudf.core.multiindex.MultiIndex.from_frame(
DataFrame(dict(zip(range(0, len(new_index)), new_index)))
)
# Collect datatypes and cast columns as that type
common_type = np.result_type(*self.dtypes)
homogenized = DataFrame(
{
c: (
self._data[c].astype(common_type)
if not np.issubdtype(self._data[c].dtype, common_type)
else self._data[c]
)
for c in self._data
}
)
data_col = libcudf.reshape.interleave_columns(homogenized)
result = Series(data=data_col, index=new_index)
if dropna:
return result.dropna()
else:
return result
def cov(self, **kwargs):
"""Compute the covariance matrix of a DataFrame.
Parameters
----------
**kwargs
Keyword arguments to be passed to cupy.cov
Returns
-------
cov : DataFrame
"""
cov = cupy.cov(self.values, rowvar=False)
df = DataFrame(cupy.asfortranarray(cov)).set_index(self.columns)
df.columns = self.columns
return df
def corr(self):
"""Compute the correlation matrix of a DataFrame.
"""
corr = cupy.corrcoef(self.values, rowvar=False)
df = DataFrame(cupy.asfortranarray(corr)).set_index(self.columns)
df.columns = self.columns
return df
def to_dict(self, orient="dict", into=dict):
raise TypeError(
"cuDF does not support conversion to host memory "
"via `to_dict()` method. Consider using "
"`.to_pandas().to_dict()` to construct a Python dictionary."
)
def keys(self):
"""
Get the columns.
This is index for Series, columns for DataFrame.
Returns
-------
Index
Columns of DataFrame.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'one' : [1, 2, 3], 'five' : ['a', 'b', 'c']})
>>> df
one five
0 1 a
1 2 b
2 3 c
>>> df.keys()
Index(['one', 'five'], dtype='object')
>>> df = cudf.DataFrame(columns=[0, 1, 2, 3])
>>> df
Empty DataFrame
Columns: [0, 1, 2, 3]
Index: []
>>> df.keys()
Int64Index([0, 1, 2, 3], dtype='int64')
"""
return self.columns
def itertuples(self, index=True, name="Pandas"):
raise TypeError(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
)
def iterrows(self):
raise TypeError(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
)
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
sort : bool, default False
Sort columns ordering if the columns of
`self` and `other` are not aligned.
verify_integrity : bool, default False
This Parameter is currently not supported.
Returns
-------
DataFrame
See Also
--------
cudf.core.reshape.concat : General function to concatenate DataFrame or
objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a cudf DataFrame can be more
computationally intensive than a single concatenate. A better
solution is to append those rows to a list and then concatenate
the list with the original DataFrame all at once.
`verify_integrity` parameter is not supported yet.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = cudf.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df2
A B
0 5 6
1 7 8
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = cudf.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient than above:
>>> cudf.concat([cudf.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if verify_integrity not in (None, False):
raise NotImplementedError(
"verify_integrity parameter is not supported yet."
)
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = DataFrame(other)
result = cudf.concat(
[self, other], ignore_index=ignore_index, sort=sort
)
return result
elif isinstance(other, Series):
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
current_cols = self.columns
combined_columns = other.index.to_pandas()
if len(current_cols):
if cudf.utils.dtypes.is_mixed_with_object_dtype(
current_cols, combined_columns
):
raise TypeError(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
)
combined_columns = current_cols.union(
combined_columns, sort=False
)
if sort:
combined_columns = combined_columns.sort_values()
other = other.reindex(combined_columns, copy=False).to_frame().T
if not current_cols.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], cudf.DataFrame):
other = cudf.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
if is_list_like(other):
to_concat = [self, *other]
else:
to_concat = [self, other]
return cudf.concat(to_concat, ignore_index=ignore_index, sort=sort)
@copy_docstring(reshape.pivot)
def pivot(self, index, columns, values=None):
return cudf.core.reshape.pivot(
self, index=index, columns=columns, values=values
)
@copy_docstring(reshape.unstack)
def unstack(self, level=-1, fill_value=None):
return cudf.core.reshape.unstack(
self, level=level, fill_value=fill_value
)
def equals(self, other):
if not isinstance(other, DataFrame):
return False
for self_name, other_name in zip(self._data.names, other._data.names):
if self_name != other_name:
return False
return super().equals(other)
_accessors = set()
def from_pandas(obj, nan_as_null=None):
"""
Convert certain Pandas objects into the cudf equivalent.
Supports DataFrame, Series, Index, or MultiIndex.
Returns
-------
DataFrame/Series/Index/MultiIndex
Return type depends on the passed input.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> data = [[0, 1], [1, 2], [3, 4]]
>>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)
>>> pdf
a b
0 0 1
1 1 2
2 3 4
>>> gdf = cudf.from_pandas(pdf)
>>> gdf
a b
0 0 1
1 1 2
2 3 4
>>> type(gdf)
<class 'cudf.core.dataframe.DataFrame'>
>>> type(pdf)
<class 'pandas.core.frame.DataFrame'>
Converting a Pandas Series to cuDF Series:
>>> psr = pd.Series(['a', 'b', 'c', 'd'], name='apple')
>>> psr
0 a
1 b
2 c
3 d
Name: apple, dtype: object
>>> gsr = cudf.from_pandas(psr)
>>> gsr
0 a
1 b
2 c
3 d
Name: apple, dtype: object
>>> type(gsr)
<class 'cudf.core.series.Series'>
>>> type(psr)
<class 'pandas.core.series.Series'>
Converting a Pandas Index to cuDF Index:
>>> pidx = pd.Index([1, 2, 10, 20])
>>> pidx
Int64Index([1, 2, 10, 20], dtype='int64')
>>> gidx = cudf.from_pandas(pidx)
>>> gidx
Int64Index([1, 2, 10, 20], dtype='int64')
>>> type(gidx)
<class 'cudf.core.index.Int64Index'>
>>> type(pidx)
<class 'pandas.core.indexes.numeric.Int64Index'>
Converting a Pandas MultiIndex to cuDF MultiIndex:
>>> pmidx = pd.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> pmidx
MultiIndex([(1, 1),
(1, 5),
(3, 2),
(4, 2),
(5, 1)],
names=['x', 'y'])
>>> gmidx = cudf.from_pandas(pmidx)
>>> gmidx
MultiIndex([(1, 1),
(1, 5),
(3, 2),
(4, 2),
(5, 1)],
names=['x', 'y'])
>>> type(gmidx)
<class 'cudf.core.multiindex.MultiIndex'>
>>> type(pmidx)
<class 'pandas.core.indexes.multi.MultiIndex'>
"""
if isinstance(obj, pd.DataFrame):
return DataFrame.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.Series):
return Series.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.MultiIndex):
return cudf.MultiIndex.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.RangeIndex):
return cudf.core.index.RangeIndex(
start=obj.start, stop=obj.stop, step=obj.step, name=obj.name
)
elif isinstance(obj, pd.Index):
return cudf.Index.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.CategoricalDtype):
return cudf.CategoricalDtype.from_pandas(obj)
else:
raise TypeError(
"from_pandas only accepts Pandas Dataframes, Series, "
"Index, RangeIndex and MultiIndex objects. "
"Got %s" % type(obj)
)
def merge(left, right, *args, **kwargs):
return left.merge(right, *args, **kwargs)
# a bit of fanciness to inject docstring with left parameter
merge_doc = DataFrame.merge.__doc__
idx = merge_doc.find("right")
merge.__doc__ = "".join(
[merge_doc[:idx], "\n\tleft : DataFrame\n\t", merge_doc[idx:]]
)
def _align_indices(lhs, rhs):
"""
Internal util to align the indices of two DataFrames. Returns a tuple of
the aligned dataframes, or the original arguments if the indices are the
same, or if rhs isn't a DataFrame.
"""
lhs_out, rhs_out = lhs, rhs
if isinstance(rhs, DataFrame) and not lhs.index.equals(rhs.index):
df = lhs.merge(
rhs,
sort=True,
how="outer",
left_index=True,
right_index=True,
suffixes=("_x", "_y"),
)
df = df.sort_index()
lhs_out = DataFrame(index=df.index)
rhs_out = DataFrame(index=df.index)
common = set(lhs.columns) & set(rhs.columns)
common_x = set(["{}_x".format(x) for x in common])
common_y = set(["{}_y".format(x) for x in common])
for col in df.columns:
if col in common_x:
lhs_out[col[:-2]] = df[col]
elif col in common_y:
rhs_out[col[:-2]] = df[col]
elif col in lhs:
lhs_out[col] = df[col]
elif col in rhs:
rhs_out[col] = df[col]
return lhs_out, rhs_out
def _setitem_with_dataframe(input_df, replace_df, input_cols=None, mask=None):
"""
This function sets item dataframes relevant columns with replacement df
:param input_df: Dataframe to be modified inplace
:param replace_df: Replacement DataFrame to replace values with
:param input_cols: columns to replace in the input dataframe
:param mask: boolean mask in case of masked replacing
"""
if input_cols is None:
input_cols = input_df.columns
if len(input_cols) != len(replace_df.columns):
raise ValueError(
"Number of Input Columns must be same replacement Dataframe"
)
for col_1, col_2 in zip(input_cols, replace_df.columns):
if col_1 in input_df.columns:
if mask is not None:
input_df._data[col_1][mask] = column.as_column(
replace_df[col_2]
)
else:
input_df._data[col_1] = column.as_column(replace_df[col_2])
else:
if mask is not None:
raise ValueError("Can not insert new column with a bool mask")
else:
# handle append case
input_df.insert(len(input_df._data), col_1, replace_df[col_2])
def extract_col(df, col):
"""
Extract column from dataframe `df` with their name `col`.
If `col` is index and there are no columns with name `index`,
then this will return index column.
"""
try:
return df._data[col]
except KeyError:
if (
col == "index"
and col not in df.index._data
and not isinstance(df.index, cudf.MultiIndex)
):
return df.index._data.columns[0]
return df.index._data[col]
def _get_union_of_indices(indexes):
if len(indexes) == 1:
return indexes[0]
else:
merged_index = cudf.core.Index._concat(indexes)
merged_index = merged_index.drop_duplicates()
_, inds = merged_index._values.sort_by_values()
return merged_index.take(inds)
def _get_union_of_series_names(series_list):
names_list = []
unnamed_count = 0
for series in series_list:
if series.name is None:
names_list.append(f"Unnamed {unnamed_count}")
unnamed_count += 1
else:
names_list.append(series.name)
if unnamed_count == len(series_list):
names_list = [*range(len(series_list))]
return names_list
def _drop_columns(df, columns, errors):
for c in columns:
try:
df._drop_column(c)
except KeyError as e:
if errors == "ignore":
pass
else:
raise e
def _get_host_unique(array):
if isinstance(
array, (cudf.Series, cudf.Index, cudf.core.column.ColumnBase)
):
return array.unique.to_pandas()
elif isinstance(array, (str, numbers.Number)):
return [array]
else:
return set(array)
|
import datetime
import pytest
from flashbriefing.models import Feed, Item, ItemType
@pytest.mark.django_db
def test_item_type_audio():
feed = Feed.objects.create(title='FEED')
item = Item.objects.create(
feed=feed, title='ITEM', audio_content='/audio.mp3',
published_date=datetime.datetime.utcnow())
assert item.item_type == ItemType.AUDIO
@pytest.mark.django_db
def test_item_type_text():
feed = Feed.objects.create(title='FEED')
item = Item.objects.create(
feed=feed, title='ITEM', published_date=datetime.datetime.utcnow())
assert item.item_type == ItemType.TEXT
|
#sqlite connect and interact with db
#sqlite db name test.sqlite
#sqlite db location c:\python27\
#db tables = users (name)
import sqlite3 #imports sqlite module
conn = sqlite3.connect('test.sqlite') #connects to the test.sqlite db
c = conn.cursor()
#variable define section
tablename1 = 'users'
newfield = 'username'
newfield1 = 'password'
newfield2 = 'assigned_port'
newfield3 = 'lat'
newfield4 = 'long'
newfield5 = 'steps'
newfield6 = 'dummylogin'
newfield7 = 'dummyint'
newfield8 = 'dummypass'
newfield9 = 'fname'
fieldtype1 = 'INTEGER'
fieldtype2 = 'STRING'
fieldtype3 = 'FLOAT'
#creating a new sqlite column for each required field
c.execute('alter table {tn} add columm {cn} {ct}'\
.format(tn=tablename1, cn = newfield9, ct=fieldtype2))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield, ct=fieldtype2))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield1, ct=fieldtype2))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield2, ct=fieldtype1))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield3, ct=fieldtype3))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield4, ct=fieldtype3))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield5, ct=fieldtype1))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield6, ct=fieldtype2))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield7, ct=fieldtype1))
c.execute('alter table {tn} add column {cn} {ct}'\
.format(tn=tablename1, cn=newfield8, ct=fieldtype2))
conn.commit() #commits (saves) changes to the db
conn.close() #closes the connection to the db
|
Entity.objects.get(name__icontains = u'ed')
|
from constant import *
from common_object import Variant, Boundary, Loop
import vcf
import sys
import random
import re
import numpy as np
import warnings
def extract_variants(input_file, sample, vt=None, svtype=None, notgt=None, qual=30, all_variant=False):
'''
In pyVCF, start and end are 0-based, but POS is 1-based,
start and end is used here, so, in variants, start and ends are 0-based
'''
vcf_reader = vcf.Reader(open(input_file, 'r'))
list_variant = []
for rc in vcf_reader:
# before 'or' is for k562 data, set notgt == None for K562
# after is for NA12878
if all_variant or (
rc.QUAL is None and notgt is None and (sample is None or sample not in rc._sample_indexes)) or \
(rc.QUAL > qual and rc.genotype(sample)['GT'] != notgt):
# if svtype is not available for VT has no subtype
if all_variant or ((vt is None or vt.lower() == rc.var_type.lower()) and \
(svtype is None or (svtype is not None and svtype.lower() in rc.var_subtype.lower()))):
dvt = rc.var_type
dsvtype = rc.var_subtype
if dsvtype == None:
dsvtype = '.'
start = rc.start
# print(rc.INFO)
if 'END' in rc.INFO:
end = rc.sv_end
else:
end = start + len(rc.REF)
af = -1
if 'AF' in rc.INFO:
af = rc.INFO['AF']
gt = "."
if sample in rc._sample_indexes: # if vcf file contains sample info
gt = rc.genotype(sample)['GT']
if gt == '0|0':
print('no mutation here')
continue
if gt == "." and not all_variant:
print(start, end, dvt, dsvtype)
print(str(rc))
break
# print(str(start) + " " + str(end) + " " + rc.var_type + " " + str(rc.var_subtype) + " " + str(rc.INFO))
#chrom, start, end, vt, subtype, ref, alt, gt = "1|1"
var = Variant(rc.CHROM, start, end, dvt, dsvtype, rc.REF, rc.ALT, gt)
var.af = af
var.id = rc.ID
list_variant.append(var)
list_variant = sorted(list_variant, key=lambda x: (x.chrid, x.start))
return list_variant
def normalize_region_len(reg, size=REGION_SIZE):
if type(reg) == list:
for x in reg:
normalize_region_len(x)
return
if reg.end - reg.start == size:
return
mid = (reg.start + reg.end) / 2
reg.start = max(0, int(mid - size/2))
reg.end = int(mid + size/2)
def is_sig_overlap(reg1, reg2):
reg_len = min(reg1.end - reg1.start, reg2.end - reg2.start)
if reg1.overlap(reg2) >= 0.9 * reg_len:
return True
return False
def merge_overlap_region(regs, overlap_rate=0.5):
'''
Merge regions overlapped more than 50% into a new region
:param li:
:param overlap_rate: to determine overlap ratio for
:return: a new list of regions
'''
regs = sorted(regs, key=lambda x: (x.chrid, x.start, x.end))
i = 0
for j in range(1, len(regs)):
overlap_len = regs[i].overlap(regs[j])
reg_len = min(regs[i].end - regs[i].start, regs[j].end - regs[j].start)
maxend = max(regs[i].end, regs[j].end)
minstart = min(regs[i].start, regs[j].start)
# if regs[j].chrom == 'chr10' and regs[j].start == 95115793 and regs[j].end == 95119281:
# print(str(regs[i]), maxend - minstart, overlap_len, reg_len)
#
# if regs[i].chrom == 'chr10' and regs[i].start == 95115793 and regs[i].end == 95119281:
# print('i', str(regs[j]), maxend - minstart, overlap_len, reg_len)
# if regs[j] not overlap with the last regs[i]
# if overlap_len < reg_len/2: or #if not is_sig_overlap(regs[i], regs[j]):
# don't merge if results too big region
if overlap_len < reg_len * overlap_rate or maxend - minstart > REGION_SIZE * 1.5:
regs[i + 1] = regs[j]
i += 1
else:
# combine regs[i] and regs[j]
regs[i].end = max(regs[i].end, regs[j].end)
regs[i].start = min(regs[i].start, regs[j].start)
if regs[i].ctcf_dir == -1:
regs[i].ctcf_dir = regs[j].ctcf_dir
elif regs[i].ctcf_dir != regs[j].ctcf_dir and regs[j].ctcf_dir != -1:
regs[i].ctcf_dir = 2
regs = regs[: i + 1]
return(regs)
def test_merge_overlap_region():
reg1 = Boundary('chr1', 10, 20)
reg2 = Boundary('chr1', 15, 30)
reg3 = Boundary('chr1', 28, 40)
rl = [reg1, reg2, reg3, reg1]
newreg1 = Boundary('chr1', 10, 30)
merged_rl = merge_overlap_region(rl)
assert len(merged_rl) == 2
assert merged_rl[0] == newreg1
#print(merged_rl[1])
assert merged_rl[1] == reg3
'''------------'''
reg1 = Boundary('chr1', 10, 20)
reg2 = Boundary('chr2', 15, 30)
reg3 = Boundary('chr3', 28, 40)
rl = [reg1, reg2, reg3]
merged_rl = merge_overlap_region(rl)
assert len(merged_rl) == 3
assert merged_rl[0] == reg1
assert merged_rl[1] == reg2
def overlap_variants(regions, variants, noLargeSV=False):
'''
assign variants into regions that overlap
noLargeSV: if true, SVs cover the whole regions will be ignored
Important note: a variant can be in multiple regions
'''
regions = sorted(regions, key=lambda x: (x.chrid, x.start, x.end))
#remove duplicate region objects if there is any
reglist = [regions[0]]
for i in range(1, len(regions)):
if regions[i] is not regions[i - 1]:
reglist.append(regions[i])
regions = reglist
variants = sorted(variants, key=lambda x: (x.chrid, x.start, x.end))
for x in regions:
x.variants = []
lastid = 0
for reg in regions:
while lastid < len(variants) and (variants[lastid].chrid < reg.chrid
or (variants[lastid].chrid == reg.chrid and variants[lastid].end < reg.start)):
lastid += 1
for j in range(lastid, len(variants)):
if reg.overlap(variants[j]) > 0:
if not noLargeSV or (noLargeSV and reg.overlap(variants[j]) < reg.end - reg.start):
#print(reg, variants[j], reg.overlap(variants[j]), reg.end - reg.start)
reg.variants.append(variants[j])
elif variants[j].chrid > reg.chrid or (variants[j].chrid == reg.chrid and variants[j].start > reg.end):
break
##remove duplicate variants in regions
# for reg in regions:
# if len(reg.variants) == 0:
# continue
#
# varlist = [reg.variants[0]]
# reg.variants = sorted(reg.variants, key=lambda x: (x.chrid, x.start, x.end))
# for i in range(1, len(reg.variants)):
# if reg.variants[i].chrid != reg.variants[i - 1].chrid or reg.variants[i].start != reg.variants[i - 1].start\
# or reg.variants[i].end != reg.variants[i - 1].end:
#
# varlist.append(reg.variants[i])
#
# reg.variants = varlist[:]
def test_overlap_variants():
reg1 = Boundary('chr1', 10, 20)
reg2 = Boundary('chr1', 15, 30)
reg3 = Boundary('chr1', 28, 40)
regions = [reg1, reg2, reg3]
#chrom, start, end, vt, subtype, ref, alt
var1 = Variant('chr1', 11, 12, 'snp','','A','T')
var2 = Variant('chr1', 16, 17, 'snp', '', 'A', 'T')
var3 = Variant('chr1', 30, 31, 'snp', '', 'A', 'T')
var4 = Variant('chr1', 10, 41, 'snp', '', 'A', 'T')
variants = [var1, var2, var3, var4]
overlap_variants(regions, variants, noLargeSV=True)
assert len(reg1.variants) == 2
assert reg1.variants[0] == var1
assert reg1.variants[1] == var2
assert len(reg2.variants) == 1
assert reg2.variants[0] == var2
assert len(reg3.variants) == 1
assert reg3.variants[0] == var3
def intersect_list(regs1,regs2, overlap_rate=0.5):
'''
Return regions in regs1 that overlap (>50%) with regions in regs2
:param regs1:
:param regs2:
:return:
'''
regs1 = sorted(regs1, key=lambda x: (x.chrid, x.start, x.end))
regs2 = sorted(regs2, key=lambda x: (x.chrid, x.start, x.end))
lasti2 = 0
rs = []
for r1 in regs1:
while lasti2 < len(regs2) and (regs2[lasti2].chrid < r1.chrid or
(regs2[lasti2].chrid == r1.chrid and regs2[lasti2].end < r1.start)):
lasti2 += 1
for i2 in range(lasti2, len(regs2)):
r2 = regs2[i2]
if r1.overlap(r2) >= min(r2.end - r2.start, r1.end - r1.start) * overlap_rate: # 50% of r1 must be overlapped to be considered
rs.append(r1)
break
if r1.chrid < r2.chrid or (r1.chrid == r2.chrid and r1.end < r2.start):
#lasti2 = i2
break
return rs
def test_intersect_list():
reg0 = Boundary('chr1', 0, 10)
reg1 = Boundary('chr1', 10, 20)
reg2 = Boundary('chr1', 30, 40)
reg3 = Boundary('chr1', 50, 60)
reg4 = Boundary('chr1', 13, 18)
reg5 = Boundary('chr1', 32, 38)
reg6 = Boundary('chr1', 55, 65)
regs1 = [reg0, reg1, reg2, reg3]
regs2 = [reg4, reg5, reg6]
rs = intersect_list(regs1, regs2)
assert len(rs) == 3
assert rs[0] == reg1
assert rs[1] == reg2
def subtract_list(regs1,regs2, overlap_rate=0.9):
'''
overlap_rate: used to define if two regions are equal
Return regions in regs1 that not overlap (>50%) with regions in regs2
:param regs1:
:param regs2:
:return:
'''
regs1 = sorted(regs1, key=lambda x: (x.chrid, x.start, x.end))
regs2 = sorted(regs2, key=lambda x: (x.chrid, x.start, x.end))
lasti2 = 0
rs = []
for r1 in regs1:
isin = False
while lasti2 < len(regs2) and (regs2[lasti2].chrid < r1.chrid or
(regs2[lasti2].chrid == r1.chrid and regs2[lasti2].end < r1.start)):
lasti2 += 1
for i2 in range(lasti2, len(regs2)):
r2 = regs2[i2]
if r1.overlap(r2) >= min(r1.end - r1.start, r2.end - r2.start) * overlap_rate:
isin = True
break
if r1.chrid < r2.chrid or (r1.chrid == r2.chrid and r1.end < r2.start):
break
if not isin:
rs.append(r1)
return rs
def test_subtract_list():
reg0 = Boundary('chr1', 0, 10)
reg1 = Boundary('chr1', 10, 20)
reg2 = Boundary('chr1', 30, 40)
reg3 = Boundary('chr1', 50, 60)
reg7 = Boundary('chr2', 50, 60)
reg4 = Boundary('chr1', 13, 18)
reg5 = Boundary('chr1', 32, 38)
reg6 = Boundary('chr1', 55, 57)
regs1 = [reg0, reg7, reg1, reg2, reg3]
regs2 = [reg4, reg5, reg6, reg0]
rs = subtract_list(regs1, regs2)
assert len(rs) == 2
assert rs[0] == reg3
assert rs[1] == reg7
#assert rs[2] ==
def sample_in_between_region(regions, size):
'''
Generate regions of 'size' in between this 'regions'
Used to make negative samples without CTCF motif
:param regions:
:param size:
:return:
'''
regions = sorted(regions, key=lambda x: (x.chrid, x.start, x.end))
rs = []
for j in range(1, len(regions)):
lastreg = regions[j - 1]
reg = regions[j]
if reg.chrid == lastreg.chrid:
start = lastreg.end + 1
end = reg.start - 1
dis = end - start # distance between this region and last region
if dis > size:
k = int(dis/size)
for i in range(k):
b = Boundary(lastreg.chrom, start + size * i, start + size * (i + 1))
rs.append(b)
return rs
def test_sample_in_between_region():
reg0 = Boundary('chr1', 0, 10)
reg1 = Boundary('chr1', 30, 40)
reg2 = Boundary('chr1', 46, 55)
reg3 = Boundary('chr1', 50, 60)
rs = sample_in_between_region([reg0, reg1, reg2, reg3], 6)
assert len(rs) == 4
assert rs[0] == Boundary('chr1', 10, 16)
assert rs[1] == Boundary('chr1', 16, 22)
def assign_ctcf_to_region(regions, ctcfs, reg_size=0):
'''
Assign ctcf direction to regions
:param regions:
:param ctcfs:
:param reg_size: optional, when provided > 0, only check a region of reg_size around the mid point
:return:
'''
regions = sorted(regions, key=lambda x: (x.chrid, x.start))
ctcfs = sorted(ctcfs, key=lambda x: (x.chrid, x.start))
buff_size = int((regions[0].end - regions[0].start - reg_size)/2) # buffer from start, end considering reg_size
start_tf = 0
for i in range(len(regions)):
while start_tf < len(ctcfs) and (ctcfs[start_tf].chrid < regions[i].chrid or \
(ctcfs[start_tf].chrid == regions[i].chrid and ctcfs[start_tf].end < regions[i].start)):
start_tf += 1
for j in range(start_tf, len(ctcfs)):
if regions[i].overlap(ctcfs[j]) == ctcfs[j].end - ctcfs[j].start:
if reg_size == 0 or (reg_size > 0 and min(regions[i].end - buff_size, ctcfs[j].end) -
max(regions[i].start + buff_size, ctcfs[j].start) == ctcfs[j].end - ctcfs[j].start):
if regions[i].ctcf_dir == 0:
regions[i].ctcf_dir = ctcfs[j].ctcf_dir
regions[i].score = ctcfs[j].score
elif regions[i].ctcf_dir != ctcfs[j].ctcf_dir:
regions[i].ctcf_dir = 3
regions[i].score = max(regions[i].score, ctcfs[j].score)
break
elif ctcfs[j].chrid > regions[i].chrid or (
ctcfs[j].chrid == regions[i].chrid and ctcfs[j].start > regions[i].end):
break
# if regions[i].ctcf_dir == 0:
# sys.stderr.write('no ctcf motif for this regions: {}\n'.format(str(regions[i])))
return regions
def test_assign_ctcf_to_region():
reg0 = Boundary('chr1', 0, 10)
reg1 = Boundary('chr1', 30, 40)
reg2 = Boundary('chr1', 46, 55)
reg3 = Boundary('chr1', 50, 60)
regs = [reg0, reg1, reg2, reg3]
cf1 = Boundary('chr1', 2, 4)
cf1.ctcf_dir = 1
cf2 = Boundary('chr1', 5, 6)
cf2.ctcf_dir = 2
cf3 = Boundary('chr1', 33, 35)
cf3.ctcf_dir = 1
cf4 = Boundary('chr1', 42, 43)
cf4.ctcf_dir = 3
ctcfs = [cf1, cf2, cf3, cf4]
regions = assign_ctcf_to_region(regs, ctcfs)
assert len(regions) == 4
assert regions[0].ctcf_dir == 3
assert regions[1].ctcf_dir == 1
assert regions[2].ctcf_dir == 0
assert regions[3].ctcf_dir == 0
def load_ctcf_chip_boundary(input_file, size=REGION_SIZE, ismerge=True, isnorm=True):
rs = []
with open(input_file,'r') as fo:
for ln in fo.readlines():
st = ln.split()
b1 = Boundary(st[0], int(st[1]), int(st[2]))
if b1.chrid > 0:
#normalize_region_len(b1, size)
rs.append(b1)
if ismerge:
rs = merge_overlap_region(rs)
if isnorm:
for r in rs:
normalize_region_len(r, size)
return rs
def load_ctcf_motif_boundary(input_file, size=REGION_SIZE, ismerge=False, isnorm=True, p_value_thres=5e-5):
'''
set size = 0 to have ctcf motif fragment
:param input_file:
:param size:
:return:
'''
rs = []
with open(input_file,'r') as fo:
for ln in fo.readlines():
if ln.startswith('#'):
continue
st = ln.split()
b1 = Boundary(st[2], int(st[3]), int(st[4]))
strand = 1 if st[5] == '+' else 2
b1.ctcf_dir = strand
b1.score = float(st[7]) # p-value
if b1.score > p_value_thres:
continue
if b1.chrid > 0:
#if size > 0:
# normalize_region_len(b1, size)
rs.append(b1)
if ismerge:
rs = merge_overlap_region(rs)
if isnorm and size > 0:
for r in rs:
normalize_region_len(r, size)
return rs
def load_ctcf_motif_JASPAR(input_file, size=REGION_SIZE, ismerge=False, isnorm=True, p_value_thres=5e-5):
'''
set size = 0 to have ctcf motif fragment
:param input_file:
:param size:
:return:
'''
rs = []
log10_thres = -1 * np.log10(p_value_thres) * 100
with open(input_file,'r') as fo:
for ln in fo.readlines():
if ln.startswith('#'):
continue
st = ln.split()
score = float(st[5]) # p-value
if score < log10_thres:
continue
b1 = Boundary(st[0], int(st[1]), int(st[2]))
strand = 1 if st[6] == '+' else 2
b1.ctcf_dir = strand
b1.score = score
if b1.chrid > 0:
#if size > 0:
# normalize_region_len(b1, size)
rs.append(b1)
if ismerge:
rs = merge_overlap_region(rs)
if isnorm and size > 0:
for r in rs:
normalize_region_len(r, size)
return rs
def split_data(regions, test_chrom, val_chrom):
if isinstance(regions[0], Boundary):
train_data = [x for x in regions if x.chrom not in (test_chrom + val_chrom)]
test_data = [x for x in regions if x.chrom in test_chrom]
val_data = [x for x in regions if x.chrom in val_chrom]
elif isinstance(regions[0], Loop):
train_data = [x for x in regions if x.b1.chrom not in (test_chrom + val_chrom)]
test_data = [x for x in regions if x.b1.chrom in test_chrom]
val_data = [x for x in regions if x.b1.chrom in val_chrom]
return (train_data, test_data, val_data)
def find_region(reg, regions):
'''
find a region in regions that = reg
use binary search
:param reg:
:param regions: must be sorted
:return:
'''
i, j = 0, len(regions)
while i <= j:
mid = int((i + j)/2)
if regions[mid] == reg: # overlap more than 80%
return regions[mid]
elif regions[mid].chrid > reg.chrid or (regions[mid].chrid == reg.chrid and regions[mid].start > reg.end):
j = mid - 1
elif regions[mid].chrid < reg.chrid or (regions[mid].chrid == reg.chrid and regions[mid].end < reg.start):
i = mid + 1
elif regions[mid].chrid == reg.chrid and regions[mid].start < reg.start:
i = mid + 1
elif regions[mid].chrid == reg.chrid and reg.start < regions[mid].start:
j = mid - 1
sys.stderr.write('There is no match regions for: {}, {}, overlap:{}\n'.format(str(reg), str(regions[mid]), reg.overlap(regions[mid])))
return None
def load_rad21_chiapet_boundary(input_file, size=REGION_SIZE, ismerge=True, isnorm=True):
rs = []
with open(input_file,'r') as fo:
for ln in fo.readlines():
if ln.startswith('#'):
continue
st = ln.split()
b1 = Boundary(st[0], int(st[1]), int(st[2]))
b2 = Boundary(st[3], int(st[4]), int(st[5]))
if b1.chrid > 0 and b2.chrid > 0:
#normalize_region_len(b1, size)
#normalize_region_len(b2, size)
rs.append(b1)
rs.append(b2)
if ismerge:
rs = merge_overlap_region(rs)
if isnorm:
for r in rs:
normalize_region_len(r, size)
return rs
def load_rad21_chiapet_boundary_jurkat(input_file, size=REGION_SIZE, ismerge=True, isnorm=True):
rs = []
with open(input_file,'r') as fo:
for ln in fo.readlines():
if ln.startswith('#'):
continue
# import re
# ln = 'chr1 27994103 28047959 chr1:27994103-28000625==chr1:28044471-28047959 6 0.129535548882624'
st = re.split('[\s:\-=]+', ln)
b1 = Boundary(st[3], int(st[4]), int(st[5]))
b2 = Boundary(st[6], int(st[7]), int(st[8]))
if b1.chrid > 0 and b2.chrid > 0:
#normalize_region_len(b1, size)
#normalize_region_len(b2, size)
rs.append(b1)
rs.append(b2)
if ismerge:
rs = merge_overlap_region(rs)
if isnorm:
for r in rs:
normalize_region_len(r, size)
return rs
def find_region_bruteforce(reg, regions):
for x in regions:
if reg == x:
return x
return None
def test_find_region():
rad21_gm12878_loop_file = "/Users/tat2016/Box Sync/Research/Data/CHIA_PET/Heidari.GM12878.Rad21.mango.interactions.FDR0.2.mango.allCC.txt"
rs = []
with open(rad21_gm12878_loop_file,'r') as fo:
for ln in fo.readlines():
st = ln.split()
b1 = Boundary(st[0], int(st[1]), int(st[2]))
b2 = Boundary(st[3], int(st[4]), int(st[5]))
if b1.chrid > 0 and b2.chrid > 0:
normalize_region_len(b1, REGION_SIZE)
normalize_region_len(b2, REGION_SIZE)
loop = Loop(b1, b2)
rs.append(loop)
boundaries = [x.b1 for x in rs] + [x.b2 for x in rs]
boundaries = merge_overlap_region(boundaries)
allboundaries = [x.b1 for x in rs] + [x.b2 for x in rs]
ntest = 1000
for i in range(ntest):
reg = random.sample(allboundaries, 1)[0]
assert find_region(reg, boundaries) == find_region_bruteforce(reg, boundaries)
b = Boundary('chr1', 1000, 5000)
assert find_region(b, boundaries) == find_region_bruteforce(b, boundaries)
def load_rad21_chiapet_loop(input_file, size=REGION_SIZE, isnorm=False):
rs = []
with open(input_file,'r') as fo:
for ln in fo.readlines():
if ln.startswith('#'):
continue
st = re.split('[\s\t]+',ln)
b1 = Boundary(st[0], int(st[1]), int(st[2]))
b2 = Boundary(st[3], int(st[4]), int(st[5]))
if b1.chrid > 0 and b2.chrid > 0:
if isnorm and size > 0:
normalize_region_len(b1, size)
normalize_region_len(b2, size)
loop = Loop(b1, b2)
rs.append(loop)
return rs
def normalize_loop(loops, size=REGION_SIZE, overlap_rate=OVERLAP_RATE):
'''
+ Get a set of boundaries
+ Normalized boundaries
+ Reassign loops with new normalized boundaries:
+ if an old boundary overlaps > 50% a new boundary, replacing the old boundary with the new one
(by construction, every old boundary will overlap (>50%) with a new boundary
:param loops:
:return:
'''
boundaries = [x.b1 for x in loops] + [x.b2 for x in loops]
for x in boundaries:
if x.end - x.start > size:
warnings.warn('region length: {} longer than:{} before normalizing, potential information lost; {}'
.format(x.end - x.start, size, str(x)))
normalize_region_len(x, size)
boundaries = merge_overlap_region(boundaries, overlap_rate=overlap_rate)
boundaries = sorted(boundaries, key=lambda x: (x.chrid, x.start, x.end))
for x in loops:
x.b1 = find_region(x.b1, boundaries)
x.b2 = find_region(x.b2, boundaries)
for x in loops:
normalize_region_len(x.b1)
normalize_region_len(x.b2)
loops = sorted(loops, key=lambda x: (x.b1.chrid, x.b1.start, x.b1.end, x.b2.chrid, x.b2.start, x.b2.end))
rs = [loops[0]]
for i in range(1, len(loops)):
if loops[i] != rs[-1]:
rs.append(loops[i])
return rs, boundaries
def make_nonloop_type123(loops, boundaries, max_dist, tp=1):
'''
:param loops:
:param boundaries:
:param max_dist:
:param tp: non-boundary type
:return:
'''
boundaries = sorted(boundaries, key=lambda x: (x.chrid, x.start))
boundary_to_id = {}
for k,v in enumerate(boundaries):
boundary_to_id[v] = k
con = np.zeros((len(boundaries), len(boundaries)))
for x in loops:
con[boundary_to_id[x.b1], boundary_to_id[x.b2]] = 1
con[boundary_to_id[x.b2], boundary_to_id[x.b1]] = 1
rs = []
for i1 in range(len(boundaries)):
for i2 in range(i1 + 1, len(boundaries)):
if boundaries[i2].chrid > boundaries[i1].chrid or boundaries[i2].start - boundaries[i1].start >= max_dist:
break
if con[i1, i2] == 0 and boundaries[i2].chrid == boundaries[i1].chrid:
if tp == 1 and boundaries[i1].ctcf_dir in [1,3] and boundaries[i2].ctcf_dir in [2,3]:
loop = Loop(boundaries[i1], boundaries[i2])
rs.append(loop)
con[i1, i2] = 1
con[i2, i1] = 1
elif tp == 2 and boundaries[i1].ctcf_dir == boundaries[i2].ctcf_dir and boundaries[i2].ctcf_dir in [1,2]:
loop = Loop(boundaries[i1], boundaries[i2])
rs.append(loop)
con[i1, i2] = 1
con[i2, i1] = 1
elif tp == 3 and boundaries[i1].ctcf_dir == 2 and boundaries[i2].ctcf_dir == 1:
loop = Loop(boundaries[i1], boundaries[i2])
rs.append(loop)
con[i1, i2] = 1
con[i2, i1] = 1
return rs
def make_nonloop_type45(boundaries, ctcf_nonboundaries, max_dist, tp=4):
'''
:param loops:
:param boundaries:
:param max_dist:
:param tp: non-boundary type
:return:
'''
boundaries = sorted(boundaries, key=lambda x: (x.chrid, x.start))
ctcf_nonboundaries = sorted(ctcf_nonboundaries, key=lambda x: (x.chrid, x.start))
rs = []
i2 = 0
for bou in boundaries:
if bou.ctcf_dir == 2:
continue
while i2 < len(ctcf_nonboundaries) and (ctcf_nonboundaries[i2].chrid < bou.chrid
or (bou.chrid == ctcf_nonboundaries[i2].chrid and ctcf_nonboundaries[i2].end < bou.start)):
i2 += 1
for i in range(i2, len(ctcf_nonboundaries)):
if bou.chrid < ctcf_nonboundaries[i].chrid or ctcf_nonboundaries[i].start - bou.start > max_dist:
break
if bou.chrid == ctcf_nonboundaries[i].chrid and ctcf_nonboundaries[i].start - bou.end > 0 and (tp == 5 or ctcf_nonboundaries[i].ctcf_dir in [2,3]):
loop = Loop(bou, ctcf_nonboundaries[i])
rs.append(loop)
return rs
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from __future__ import absolute_import
from celery import shared_task
from dataqs.udatp.udatp import UoDAirTempPrecipProcessor
@shared_task
def udatp_task():
processor = UoDAirTempPrecipProcessor()
processor.run()
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class UsersConfig(AppConfig):
name = 'dvhb_hybrid.users'
label = 'dvhb_hybrid.users'
verbose_name = _('users')
|
import smart_imports
smart_imports.all()
class DropItemAbilityTest(helpers.UseAbilityTaskMixin, utils_testcase.TestCase):
PROCESSOR = deck.drop_item.DropItem
def setUp(self):
super(DropItemAbilityTest, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.ability = self.PROCESSOR()
@property
def use_attributes(self):
return super(DropItemAbilityTest, self).use_attributes(hero=self.hero, storage=self.storage)
def test_no_items(self):
self.assertEqual(self.hero.bag.occupation, 0)
self.assertEqual(self.ability.use(**self.use_attributes), (game_postponed_tasks.ComplexChangeTask.RESULT.FAILED, game_postponed_tasks.ComplexChangeTask.STEP.ERROR, ()))
def test_success(self):
self.hero.bag.put_artifact(artifacts_storage.artifacts.generate_artifact_from_list(artifacts_storage.artifacts.artifacts, self.hero.level, rarity=artifacts_relations.RARITY.NORMAL))
with self.check_delta(lambda: self.hero.bag.occupation, -1):
self.assertEqual(self.ability.use(**self.use_attributes), (game_postponed_tasks.ComplexChangeTask.RESULT.SUCCESSED, game_postponed_tasks.ComplexChangeTask.STEP.SUCCESS, ()))
@mock.patch('the_tale.game.heroes.objects.Hero.might_crit_chance', 1)
def test_success__critical(self):
self.hero.bag.put_artifact(artifacts_storage.artifacts.generate_artifact_from_list(artifacts_storage.artifacts.artifacts, self.hero.level, rarity=artifacts_relations.RARITY.NORMAL))
old_money_stats = self.hero.statistics.money_earned_from_help
self.assertEqual(self.hero.bag.occupation, 1)
self.assertEqual(self.ability.use(**self.use_attributes), (game_postponed_tasks.ComplexChangeTask.RESULT.SUCCESSED, game_postponed_tasks.ComplexChangeTask.STEP.SUCCESS, ()))
self.assertEqual(self.hero.bag.occupation, 0)
self.assertTrue(old_money_stats < self.hero.statistics.money_earned_from_help)
|
#!/usr/bin/env python
from Builder import get_workspace
import argparse
parser = argparse.ArgumentParser(description='Build binned workspaces.')
parser.add_argument('argument', type=str, choices = ['bins','chans','nps','events'],
help='The parameter to be scaled')
parser.add_argument('-l',dest='somerange', type=int, nargs='+',
help='A list of options to scale over')
parser.add_argument('-r',dest='somerange', type=int, nargs='+',
help='The range to scale over')
arg = parser.parse_args().argument
somerange = parser.parse_args().somerange
if len(somerange) <= 2:
if len(somerange) > 1:
somerange = range(somerange[0],somerange[1])
else:
somerange = range(somerange[0])
print somerange
d = {'events':1000,
'chans':1,
'nps':1,
'bins':10}
for i in somerange:
print "writing {} {}".format(i, arg)
d[arg] = i
workspace = get_workspace(nchannels = d['chans'], events = d['events'], nbins = d['bins'], nnps = d['nps'])
workspace.SetName('BinnedWorkspace')
workspace.writeToFile("newoutput/workspace{}channels{}events{}bins{}nps.root".format(d['chans'], d['events'], d['bins'], d['nps']))
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""File size limiting functionality for Invenio-Files-REST."""
def file_size_limiters(bucket):
"""Get default file size limiters.
:param bucket: The :class:`invenio_files_rest.models.Bucket` instance.
:returns: A list containing an instance of
:class:`invenio_files_rest.limiters.FileSizeLimit` with quota left
value and description and another one with max file size value and
description.
"""
return [
FileSizeLimit(
bucket.quota_left,
'Bucket quota exceeded.',
),
FileSizeLimit(
bucket.max_file_size,
'Maximum file size exceeded.',
),
]
class FileSizeLimit(object):
"""File size limiter."""
not_implemented_error = NotImplementedError(
'FileSizeLimit supports only comparisons with integers and other '
'FileSizeLimits.')
def __init__(self, limit, reason):
"""Instantiate a new file size limit.
:param limit: The imposed imposed limit.
:param reason: The limit description.
"""
self.limit = limit
self.reason = reason
def __lt__(self, other):
"""Check if this limit is less than the other one."""
if isinstance(other, int):
return self.limit < other
elif isinstance(other, FileSizeLimit):
return self.limit < other.limit
raise self.not_implemented_error
def __gt__(self, other):
"""Check if this limit is greater than the other one."""
if isinstance(other, int):
return self.limit > other
elif isinstance(other, FileSizeLimit):
return self.limit > other.limit
raise self.not_implemented_error
def __eq__(self, other):
"""Check for equality."""
if isinstance(other, int):
return self.limit == other
elif isinstance(other, FileSizeLimit):
return self.limit == other.limit
raise self.not_implemented_error
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.exceptions import BadRequest
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from flask import json
from flask.globals import current_app
class JSONMixin(object):
"""Common mixin for both request and response objects to provide JSON
parsing capabilities.
.. versionadded:: 1.0
"""
_cached_json = Ellipsis
@property
def is_json(self):
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
.. versionadded:: 0.11
"""
mt = self.mimetype
return (
mt == 'application/json'
or (mt.startswith('application/')) and mt.endswith('+json')
)
@property
def json(self):
"""This will contain the parsed JSON data if the mimetype indicates
JSON (:mimetype:`application/json`, see :meth:`is_json`), otherwise it
will be ``None``.
"""
return self.get_json()
def _get_data_for_json(self, cache):
return self.get_data(cache=cache)
def get_json(self, force=False, silent=False, cache=True):
"""Parse and return the data as JSON. If the mimetype does not indicate
JSON (:mimetype:`application/json`, see :meth:`is_json`), this returns
``None`` unless ``force`` is true. If parsing fails,
:meth:`on_json_loading_failed` is called and its return value is used
as the return value.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None`` instead.
:param cache: Store the parsed JSON to return for subsequent calls.
"""
if cache and self._cached_json is not Ellipsis:
return self._cached_json
if not (force or self.is_json):
return None
# We accept MIME charset against the specification as certain clients
# have used this in the past. For responses, we assume that if the
# charset is set then the data has been encoded correctly as well.
charset = self.mimetype_params.get('charset')
try:
data = self._get_data_for_json(cache=cache)
rv = json.loads(data, encoding=charset)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if :meth:`get_json` parsing fails and isn't silenced. If
this method returns a value, it is used as the return value for
:meth:`get_json`. The default implementation raises a
:class:`BadRequest` exception.
.. versionchanged:: 0.10
Raise a :exc:`BadRequest` error instead of returning an error
message as JSON. If you want that behavior you can add it by
subclassing.
.. versionadded:: 0.8
"""
if current_app is not None and current_app.debug:
raise BadRequest('Failed to decode JSON object: {0}'.format(e))
raise BadRequest()
#matched endpoint and view arguments.
class Request(RequestBase, JSONMixin):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#: Though if the request's method was invalid for the URL rule,
#: the valid list is available in ``routing_exception.valid_methods``
#: instead (an attribute of the Werkzeug exception :exc:`~werkzeug.exceptions.MethodNotAllowed`)
#: because the request was never internally bound.
#:
#: .. versionadded:: 0.6
url_rule = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
@property
def max_content_length(self):
"""Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
if current_app:
return current_app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be ``None``.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
def _load_form_data(self):
RequestBase._load_form_data(self)
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
if (
current_app
and current_app.debug
and self.mimetype != 'multipart/form-data'
and not self.files
):
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
class Response(ResponseBase, JSONMixin):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
.. versionchanged:: 1.0
JSON support is added to the response, like the request. This is useful
when testing to get the test client response data as JSON.
"""
default_mimetype = 'text/html'
def _get_data_for_json(self, cache):
return self.get_data()
|
"""
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
from importlib.util import find_spec
class Settings(object):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def __init__(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = "settings"
if find_spec(settings_module):
mod = importlib.import_module(settings_module)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
def get(self, key, default=None):
if hasattr(self, key):
return getattr(self, key)
return default
def set(self, key, value):
self.__setattr__(key, value)
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
settings = Settings()
|
from __future__ import absolute_import
from __future__ import print_function
from six.moves import map
from six.moves import range
if 1:
import numpy as N
from statlib import pstat, stats
from .pstat import *
from .stats import *
from numpy import linalg as LA
import operator, math
def aanova(data,effects=['A','B','C','D','E','F','G','H','I','J','K']):
"""
Prints the results of single-variable between- and within-subject ANOVA
designs. The function can only handle univariate ANOVAs with a single
random factor. The random factor is coded in column 0 of the input
list/array (see below) and the measured variable is coded in the last
column of the input list/array. The following were used as references
when writing the code:
Maxwell, SE, Delaney HD (1990) Designing Experiments and Analyzing
Data, Wadsworth: Belmont, CA.
Lindman, HR (1992) Analysis of Variance in Experimental Design,
Springer-Verlag: New York.
TO DO: Increase Current Max Of 10 Levels Per W/I-Subject Factor
Consolidate Between-Subj Analyses For Between And Within/Between
Front-end for different input data-array shapes/organization
Axe mess of 'global' statements (particularly for Drestrict fcns)
Usage: anova(data, data = |Stat format
effects=['A','B','C','D','E','F','G','H','I','J','K'])
Note: |Stat format is as follows ... one datum per row, first element of
row is the subject identifier, followed by all within/between subject
variable designators, and the measured data point as the last element in the
row. Thus, [1, 'short', 'drugY', 2, 14.7] represents subject 1 when measured
in the short / drugY / 2 condition, and subject 1 gave a measured value of
14.7 in this combination of conditions. Thus, all input lists are '2D'
lists-of-lists.
"""
global alluniqueslist, Nlevels, Nfactors, Nsubjects, Nblevels, Nallsources
global Bscols, Bbetweens, SSlist, SSsources, DM, DN, Bwonly_sources, D
global Bwithins, alleffects, alleffsources
outputlist = []
SSbtw = []
SSbtwsources = []
SSwb = []
SSwbsources = []
alleffects = []
alleffsources = []
SSlist = []
SSsources = []
print()
variables = 1 # this function only handles one measured variable
if type(data)!=type([]):
data = data.tolist()
## Create a list of all unique values in each column, and a list of these Ns
alluniqueslist = [0]*(len(data[0])-variables) # all cols but data cols
Nlevels = [0]*(len(data[0])-variables) # (as above)
for column in range(len(Nlevels)):
alluniqueslist[column] = pstat.unique(pstat.colex(data,column))
Nlevels[column] = len(alluniqueslist[column])
Ncells = N.multiply.reduce(Nlevels[1:]) # total num cells (w/i AND btw)
Nfactors = len(Nlevels[1:]) # total num factors
Nallsources = 2**(Nfactors+1) # total no. possible sources (factor-combos)
Nsubjects = len(alluniqueslist[0]) # total # subj in study (# of diff. subj numbers in column 0)
## Within-subj factors defined as those where there are fewer subj than
## scores in the first level of a factor (quick and dirty; findwithin() below)
Bwithins = findwithin(data) # binary w/i subj factors (excl. col 0)
Bbetweens = ~Bwithins & (Nallsources-1) - 1
Wcolumns = makelist(Bwithins,Nfactors+1) # get list of cols of w/i factors
Wscols = [0] + Wcolumns # w/i subj columns INCL col 0
Bscols = makelist(Bbetweens+1,Nfactors+1) #list of btw-subj cols,INCL col 0
Nwifactors = len(Wscols) - 1 # WAS len(Wcolumns)
Nwlevels = N.take(N.array(Nlevels),Wscols) # no.lvls for each w/i subj fact
Nbtwfactors = len(Bscols) - 1 # WASNfactors - Nwifactors + 1
Nblevels = N.take(N.array(Nlevels),Bscols)
Nwsources = 2**Nwifactors - 1 # num within-subject factor-combos
Nbsources = Nallsources - Nwsources
#
# CALC M-VARIABLE (LIST) and Marray/Narray VARIABLES (ARRAY OF CELL MNS/NS)
#
# Eliminate replications for the same subject in same condition as well as
# within-subject repetitions, keep as list
M = pstat.collapse(data,Bscols,-1,None,None,mean)
# Create an arrays of Nblevels shape (excl. subj dim)
Marray = N.zeros(Nblevels[1:],'f')
Narray = N.zeros(Nblevels[1:],'f')
# Fill arrays by looping through all scores in the (collapsed) M
for row in M:
idx = []
for i in range(len(row[:-1])):
idx.append(alluniqueslist[Bscols[i]].index(row[i]))
idx = idx[1:]
Marray[idx] = Marray[idx] + row[-1]
Narray[idx] = Narray[idx] + 1
Marray = Marray / Narray
#
# CREATE DATA ARRAY, DA, FROM ORIGINAL INPUT DATA
# (this is an unbelievably bad, wasteful data structure, but it makes lots
# of tasks much easier; should nevertheless be fixed someday)
# This limits the within-subject level count to 10!
coefflist =[[[1]],
[[-1,1]],
[[-1,0,1],[1,-2,1]],
[[-3,-1,1,3],[1,-1,-1,1],[-1,3,-3,1]],
[[-2,-1,0,1,2],[2,-1,-2,-1,2],[-1,2,0,-2,1],[1,-4,6,-4,1]],
[[-5,-3,-1,1,3,5],[5,-1,-4,-4,-1,5],[-5,7,4,-4,-7,5],
[1,-3,2,2,-3,1],[-1,5,-10,10,-5,1]],
[[-3,-2,-1,0,1,2,3],[5,0,-3,-4,-3,0,5],[-1,1,1,0,-1,-1,1],
[3,-7,1,6,1,-7,3],[-1,4,-5,0,5,-4,1],[1,-6,15,-20,15,-6,1]],
[[-7,-5,-3,-1,1,3,5,7],[7,1,-3,-5,-5,-3,1,7],
[-7,5,7,3,-3,-7,-5,7],[7,-13,-3,9,9,-3,-13,7],
[-7,23,-17,-15,15,17,-23,7],[1,-5,9,-5,-5,9,-5,1],
[-1,7,-21,35,-35,21,-7,1]],
[[-4,-3,-2,-1,0,1,2,3,4],[28,7,-8,-17,-20,-17,-8,7,28],
[-14,7,13,9,0,-9,-13,-7,14],[14,-21,-11,9,18,9,-11,-21,14],
[-4,11,-4,-9,0,9,4,-11,4],[4,-17,22,1,-20,1,22,-17,4],
[-1,6,-14,14,0,-14,14,-6,1],[1,-8,28,-56,70,-56,28,-8,1]],
[[-9,-7,-5,-3,-1,1,3,5,7,9],[6,2,-1,-3,-4,-4,-3,-1,2,6],
[-42,14,35,31,12,-12,-31,-35,-14,42],
[18,-22,-17,3,18,18,3,-17,-22,18],
[-6,14,-1,-11,-6,6,11,1,-14,6],[3,-11,10,6,-8,-8,6,10,-11,3],
[9,-47,86,-42,-56,56,42,-86,47,-9],
[1,-7,20,-28,14,14,-28,20,-7,1],
[-1,9,-36,84,-126,126,-84,36,-9,1]]]
dindex = 0
# Prepare a list to be filled with arrays of D-variables, array per within-
# subject combo (i.e., for 2 w/i subj factors E and F ... E, F, ExF)
NDs = [0]* Nwsources
for source in range(Nwsources):
if subset(source,Bwithins):
NDs[dindex] = numlevels(source,Nlevels)
dindex = dindex + 1
# Collapse multiple repetitions on the same subject and same condition
cdata = pstat.collapse(data,list(range(Nfactors+1)),-1,None,None,mean)
# Find a value that's not a data score with which to fill the array DA
dummyval = -1
datavals = pstat.colex(data,-1)
while dummyval in datavals: # find a value that's not a data score
dummyval = dummyval - 1
DA = N.ones(Nlevels,'f')*dummyval # create plenty of data-slots to fill
if len(Bscols) == 1: # ie., if no btw-subj factors
# 1 (below) needed because we need 2D array even w/ only 1 group of subjects
subjslots = N.ones((Nsubjects,1))
else: # create array to hold 1s (subj present) and 0s (subj absent)
subjslots = N.zeros(Nblevels)
for i in range(len(data)): # for every datapoint given as input
idx = []
for j in range(Nfactors+1): # get n-D bin idx for this datapoint
new = alluniqueslist[j].index(data[i][j])
idx.append(new)
DA[idx] = data[i][-1] # put this data point in proper place in DA
btwidx = N.take(idx,N.array(Bscols))
subjslots[btwidx] = 1
# DONE CREATING DATA ARRAY, DA ... #dims = numfactors+1, dim 0=subjects
# dim -1=measured values, dummyval = values used to fill empty slots in DA
# PREPARE FOR MAIN LOOP
dcount = -1 # prepare for pre-increment of D-variable pointer
Bwsources = [] # binary #s, each=source containing w/i subj factors
Bwonly_sources = [] # binary #s, each=source of w/i-subj-ONLY factors
D = N.zeros(Nwsources,N.PyObject) # one slot for each Dx,2**Nwifactors
DM = [0] *Nwsources # Holds arrays of cell-means
DN = [0] *Nwsources # Holds arrays of cell-ns
# BEGIN MAIN LOOP!!!!!
# BEGIN MAIN LOOP!!!!!
# BEGIN MAIN LOOP!!!!!
for source in range(3,Nallsources,2): # all sources that incl. subjects
if ((source-1) & Bwithins) != 0: # 1 or more w/i subj sources?
Bwsources.append(source-1) # add it to a list
#
# WITHIN-SUBJECT-ONLY TERM? IF SO ... NEED TO CALCULATE NEW D-VARIABLE
# (per Maxwell & Delaney pp.622-4)
if subset((source-1),Bwithins):
# Keep track of which D-var set we're working with (De, Df, Def, etc.)
dcount = dcount + 1
Bwonly_sources.append(source-1) #add source, minus subj,to list
dwsc = 1.0 * DA # get COPY of w/i-subj data array
# Find all non-source columns, note ~source alone (below) -> negative number
Bnonsource = (Nallsources-1) & ~source
Bwscols = makebin(Wscols) # make a binary version of Wscols
# Figure out which cols from the ORIGINAL (input) data matrix are both non-
# source and also within-subj vars (excluding subjects col)
Bwithinnonsource = Bnonsource & Bwscols
# Next, make a list of the above. The list is a list of dimensions in DA
# because DA has the same number of dimensions as there are factors
# (including subjects), but with extra dummyval='-1' values the original
# data array (assuming between-subj vars exist)
Lwithinnonsource = makelist(Bwithinnonsource,Nfactors+1)
# Collapse all non-source, w/i subj dims, FROM THE END (otherwise the
# dim-numbers change as you collapse). THIS WORKS BECAUSE WE'RE
# COLLAPSING ACROSS W/I SUBJECT DIMENSIONS, WHICH WILL ALL HAVE THE
# SAME SUBJ IN THE SAME ARRAY LOCATIONS (i.e., dummyvals will still exist
# but should remain the same value through the amean() function
for i in range(len(Lwithinnonsource)-1,-1,-1):
dwsc = amean(dwsc,Lwithinnonsource[i])
mns = dwsc
# NOW, ACTUALLY COMPUTE THE D-VARIABLE ENTRIES FROM DA
# CREATE LIST OF COEFF-COMBINATIONS TO DO (len=e-1, f-1, (e-1)*(f-1), etc...)
#
# Figure out which cols are both source and within-subjects, including col 0
Bwithinsource = source & Bwscols
# Make a list of within-subj cols, incl subjects col (0)
Lwithinsourcecol = makelist(Bwithinsource, Nfactors+1)
# Make a list of cols that are source within-subj OR btw-subj
Lsourceandbtws = makelist(source | Bbetweens, Nfactors+1)
if Lwithinnonsource != []:
Lwithinsourcecol = list(map(Lsourceandbtws.index,Lwithinsourcecol))
# Now indxlist should hold a list of indices into the list of possible
# coefficients, one row per combo of coefficient. Next line PRESERVES dummyval
dvarshape = N.array(N.take(mns.shape,Lwithinsourcecol[1:])) -1
idxarray = N.indices(dvarshape)
newshape = N.array([idxarray.shape[0],
N.multiply.reduce(idxarray.shape[1:])])
indxlist = N.swapaxes(N.reshape(idxarray,newshape),0,1)
# The following is what makes the D-vars 2D. It takes an n-dim array
# and retains the first (num of factors) dim while making the 2nd dim
# equal to the total number of source within-subject cells.
#
# CREATE ALL D-VARIABLES FOR THIS COMBINATION OF FACTORS
#
for i in range(len(indxlist)):
#
# FILL UP COEFFMATRIX (OF SHAPE = MNS) WITH CORRECT COEFFS FOR 1 D-VAR
#
coeffmatrix = N.ones(mns.shape,N.Float) # fewer dims than DA (!!)
# Make a list of dim #s that are both in source AND w/i subj fact, incl subj
Wsourcecol = makelist(Bwscols&source,Nfactors+1)
# Fill coeffmatrix with a complete set of coeffs (1 per w/i-source factor)
for wfactor in range(len(Lwithinsourcecol[1:])):
#put correct coeff. axis as first axis, or "swap it up"
coeffmatrix = N.swapaxes(coeffmatrix,0,
Lwithinsourcecol[wfactor+1])
# Find appropriate ROW of (static) coefflist we need
nlevels = coeffmatrix.shape[0]
# Get the next coeff in that row
try:
nextcoeff = coefflist[nlevels-1][indxlist[i,wfactor]]
except IndexError:
raise IndexError("anova() can only handle up to 10 levels on a within-subject factors")
for j in range(nlevels):
coeffmatrix[j] = coeffmatrix[j] * nextcoeff[j]
# Swap it back to where it came from
coeffmatrix = N.swapaxes(coeffmatrix,0,
Lwithinsourcecol[wfactor+1])
# CALCULATE D VARIABLE
scratch = coeffmatrix * mns
# Collapse all dimensions EXCEPT subjects dim (dim 0)
for j in range(len(coeffmatrix.shape[1:])):
scratch = N.add.reduce(scratch,1)
if len(scratch.shape) == 1:
scratch.shape = list(scratch.shape)+[1]
try:
# Tack this column onto existing ones
tmp = D[dcount].shape
D[dcount] = pstat.aabut(D[dcount],scratch)
except AttributeError: # i.e., D[dcount]=integer/float
# If this is the first, plug it in
D[dcount] = scratch
# Big long thing to create DMarray (list of DM variables) for this source
variables = D[dcount].shape[1] # Num variables for this source
tidx = list(range(1,len(subjslots.shape))) + [0] # [0] = Ss dim
tsubjslots = N.transpose(subjslots,tidx) # put Ss in last dim
DMarray = N.zeros(list(tsubjslots.shape[0:-1]) +
[variables],'f') # btw-subj dims, then vars
DNarray = N.zeros(list(tsubjslots.shape[0:-1]) +
[variables],'f') # btw-subj dims, then vars
idx = [0] *len(tsubjslots.shape[0:-1])
idx[0] = -1
loopcap = N.array(tsubjslots.shape[0:-1]) -1
while incr(idx,loopcap) != -1:
DNarray[idx] = float(asum(tsubjslots[idx]))
thismean = (N.add.reduce(tsubjslots[idx] * # 1=subj dim
N.transpose(D[dcount]),1) /
DNarray[idx])
thismean = N.array(thismean,N.PyObject)
DMarray[idx] = thismean
DM[dcount] = DMarray
DN[dcount] = DNarray
#
# DONE CREATING M AND D VARIABLES ... TIME FOR SOME SS WORK
# DONE CREATING M AND D VARIABLES ... TIME FOR SOME SS WORK
#
if Bscols[1:] != []:
BNs = pstat.colex([Nlevels],Bscols[1:])
else:
BNs = [1]
#
# FIGURE OUT WHICH VARS TO RESTRICT, see p.680 (Maxwell&Delaney)
#
# BETWEEN-SUBJECTS VARIABLES ONLY, use M variable for analysis
#
if ((source-1) & Bwithins) == 0: # btw-subjects vars only?
sourcecols = makelist(source-1,Nfactors+1)
# Determine cols (from input list) required for n-way interaction
Lsource = makelist((Nallsources-1)&Bbetweens,Nfactors+1)
# NOW convert this list of between-subject column numbers to a list of
# DIMENSIONS in M, since M has fewer dims than the original data array
# (assuming within-subj vars exist); Bscols has list of between-subj cols
# from input list, the indices of which correspond to that var's loc'n in M
btwcols = list(map(Bscols.index,Lsource))
# Obviously-needed loop to get cell means is embedded in the collapse fcn, -1
# represents last (measured-variable) column, None=std, 1=retain Ns
hn = aharmonicmean(Narray,-1) # -1=unravel first
# CALCULATE SSw ... SUBTRACT APPROPRIATE CELL MEAN FROM EACH SUBJ SCORE
SSw = 0.0
idxlist = pstat.unique(pstat.colex(M,btwcols))
for row in M:
idx = []
for i in range(len(row[:-1])):
idx.append(alluniqueslist[Bscols[i]].index(row[i]))
idx = idx[1:] # Strop off Ss col/dim
newval = row[-1] - Marray[idx]
SSw = SSw + (newval)**2
# Determine which cols from input are required for this source
Lsource = makelist(source-1,Nfactors+1)
# NOW convert this list of between-subject column numbers to a list of
# DIMENSIONS in M, since M has fewer dims than the original data array
# (assuming within-subj vars exist); Bscols has list of between-subj cols
# from input list, the indices of which correspond to that var's loc'n in M
btwsourcecols = (N.array(list(map(Bscols.index,Lsource)))-1).tolist()
# Average Marray and get harmonic means of Narray OVER NON-SOURCE DIMS
Bbtwnonsourcedims = ~source & Bbetweens
Lbtwnonsourcedims = makelist(Bbtwnonsourcedims,Nfactors+1)
btwnonsourcedims = (N.array(list(map(Bscols.index,Lbtwnonsourcedims)))-1).tolist()
## Average Marray over non-source dimensions (1=keep squashed dims)
sourceMarray = amean(Marray,btwnonsourcedims,1)
## Calculate harmonic means for each level in source
sourceNarray = aharmonicmean(Narray,btwnonsourcedims,1)
## Calc grand average (ga), used for ALL effects
ga = asum((sourceMarray*sourceNarray)/
asum(sourceNarray))
ga = N.reshape(ga,N.ones(len(Marray.shape)))
## If GRAND interaction, use harmonic mean of ALL cell Ns
if source == Nallsources-1:
sourceNarray = aharmonicmean(Narray)
## Calc all SUBSOURCES to be subtracted from sourceMarray (M&D p.320)
sub_effects = 1.0 * ga # start with grand mean
for subsource in range(3,source,2):
## Make a list of the non-subsource dimensions
if subset(subsource-1,source-1):
sub_effects = (sub_effects +
alleffects[alleffsources.index(subsource)])
## Calc this effect (a(j)'s, b(k)'s, ab(j,k)'s, whatever)
effect = sourceMarray - sub_effects
## Save it so you don't have to calculate it again next time
alleffects.append(effect)
alleffsources.append(source)
## Calc and save sums of squares for this source
SS = asum((effect**2 *sourceNarray) *
N.multiply.reduce(N.take(Marray.shape,btwnonsourcedims)))
## Save it so you don't have to calculate it again next time
SSlist.append(SS)
SSsources.append(source)
collapsed = pstat.collapse(M,btwcols,-1,None,len,mean)
# Obviously needed for-loop to get source cell-means embedded in collapse fcns
contrastmns = pstat.collapse(collapsed,btwsourcecols,-2,sterr,len,mean)
# Collapse again, this time SUMMING instead of averaging (to get cell Ns)
contrastns = pstat.collapse(collapsed,btwsourcecols,-1,None,None,
N.sum)
# Collapse again, this time calculating harmonicmeans (for hns)
contrasthns = pstat.collapse(collapsed,btwsourcecols,-1,None,None,
harmonicmean)
# CALCULATE *BTW-SUBJ* dfnum, dfden
sourceNs = pstat.colex([Nlevels],makelist(source-1,Nfactors+1))
dfnum = N.multiply.reduce(N.ravel(N.array(sourceNs)-1))
dfden = Nsubjects - N.multiply.reduce(N.ravel(BNs))
# CALCULATE MS, MSw, F AND PROB FOR ALL-BETWEEN-SUBJ SOURCES ONLY
MS = SS / dfnum
MSw = SSw / dfden
if MSw != 0:
f = MS / MSw
else:
f = 0 # i.e., absolutely NO error in the full model
if f >= 0:
prob = fprob(dfnum, dfden, f)
else:
prob = 1.0
# Now this falls thru to output stage
#
# SOME WITHIN-SUBJECTS FACTORS TO DEAL WITH ... use appropriate D variable
#
else: # Source has some w/i subj factors
# FIGURE OUT WHICH D-VAR TO USE BASED ON WHICH W/I-SUBJ FACTORS ARE IN SOURCE
# Determine which w/i-subj factors are in this source
sourcewithins = (source-1) & Bwithins
# Use D-var that was created for that w/i subj combo (the position of that
# source within Bwsources determines the index of that D-var in D)
workD = D[Bwonly_sources.index(sourcewithins)]
# CALCULATE Er, Ef
## Set up workD and subjslots for upcoming calcs
if len(workD.shape)==1:
workD = workD[:,N.NewAxis]
if len(subjslots.shape)==1:
subjslots = subjslots[:,N.NewAxis]
## Calculate full-model sums of squares
ef = Dfull_model(workD,subjslots) # Uses cell-means model
#
# **ONLY** WITHIN-SUBJECT VARIABLES TO CONSIDER
#
if subset((source-1),Bwithins):
# restrict grand mean, as per M&D p.680
er = Drestrict_mean(workD,subjslots)
#
# **BOTH** WITHIN- AND BETWEEN-SUBJECTS VARIABLES TO CONSIDER
#
else:
er = Drestrict_source(workD,subjslots,source) + ef
SSw = LA.determinant(ef)
SS = LA.determinant(er) - SSw
# CALCULATE *W/I-SUBJ* dfnum, dfden
sourceNs = pstat.colex([Nlevels],makelist(source,Nfactors+1))
# Calculation of dfnum is straightforward regardless
dfnum = N.multiply.reduce(N.ravel(N.array(sourceNs)-1)[1:])
# If only within-subject factors are involved, dfden is straightforward
if subset(source-1,Bwithins):
dfden = Nsubjects -N.multiply.reduce(N.ravel(BNs))-dfnum +1
MS = SS / dfnum
MSw = SSw / dfden
if MSw != 0:
f = MS / MSw
else:
f = 0 # i.e., absolutely NO error in full model
if f >= 0:
prob = fprob(dfnum, dfden, f)
else:
prob = 1.0
# If combined within-between source, must use Rao's approximation for dfden
# from Tatsuoka, MM (1988) Multivariate Analysis (2nd Ed), MacMillan: NY p93
else: # it's a within-between combo source
try:
p = workD.shape[1]
except IndexError:
p = 1
k = N.multiply.reduce(N.ravel(BNs))
m = Nsubjects -1 -(p+k)/2.0
d_en = float(p**2 + (k-1)**2 - 5)
if d_en == 0.0:
s = 1.0
else:
s = math.sqrt(((p*(k-1))**2-4) / d_en)
dfden = m*s - dfnum/2.0 + 1
# Given a within-between combined source, Wilk's Lambda is appropriate
if LA.determinant(er) != 0:
lmbda = LA.determinant(ef) / LA.determinant(er)
W = math.pow(lmbda,(1.0/s))
f = ((1.0-W)/W) * (dfden/dfnum)
else:
f = 0 # i.e., absolutely NO error in restricted model
if f >= 0:
prob = fprob(dfnum,dfden,f)
else:
prob = 1.0
#
# CREATE STRING-LIST FOR RESULTS FROM THIS PARTICULAR SOURCE
#
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = '***'
elif prob < 0.01: suffix = '**'
elif prob < 0.05: suffix = '*'
adjsourcecols = N.array(makelist(source-1,Nfactors+1)) -1
thiseffect = ''
for col in adjsourcecols:
if len(adjsourcecols) > 1:
thiseffect = thiseffect + effects[col][0]
else:
thiseffect = thiseffect + (effects[col])
outputlist = (outputlist
# These terms are for the numerator of the current effect/source
+ [[thiseffect, round4(SS),dfnum,
round4(SS/float(dfnum)),round4(f),
round4(prob),suffix]]
# These terms are for the denominator for the current effect/source
+ [[thiseffect+'/w', round4(SSw),dfden,
round4(SSw/float(dfden)),'','','']]
+ [['\n']])
#
# PRINT OUT ALL MEANS AND Ns FOR THIS SOURCE (i.e., this combo of factors)
#
Lsource = makelist(source-1,Nfactors+1)
collapsed = pstat.collapse(cdata,Lsource,-1,sterr,len,mean)
# First, get the list of level-combos for source cells
prefixcols = list(range(len(collapsed[0][:-3])))
outlist = pstat.colex(collapsed,prefixcols)
# Start w/ factor names (A,B,C, or ones input to anova())
eff = []
for col in Lsource:
eff.append(effects[col-1])
# Add in the mean and N labels for printout
for item in ['MEAN','STERR','N']:
eff.append(item)
# To the list of level-combos, abut the corresp. means and Ns
outlist = pstat.abut(outlist,
list(map(round4,pstat.colex(collapsed,-3))),
list(map(round4,pstat.colex(collapsed,-2))),
list(map(round4,pstat.colex(collapsed,-1))))
outlist = [eff] + outlist # add titles to the top of the list
pstat.printcc(outlist) # print it in customized columns
print()
###
### OUTPUT FINAL RESULTS (ALL SOURCES TOGETHER)
### Note: All 3 types of source-calcs fall through to here
###
print()
title = [['FACTORS: ','RANDOM'] + effects[:Nfactors]]
title = title + [['LEVELS: ']+Nlevels]
facttypes = ['BETWEEN']*Nfactors
for i in range(len(Wscols[1:])):
facttypes[Wscols[i+1]-1] = 'WITHIN'
title = title + [['TYPE: ','RANDOM']+facttypes]
pstat.printcc(title)
print()
title = [['Effect','SS','DF','MS','F','p','sig']] + ['dashes']
outputlist = title + outputlist
pstat.printcc(outputlist)
return
def Dfull_model(workd,subjslots):
"""
RESTRICTS NOTHING (i.e., FULL MODEL CALCULATION). Subtracts D-variable
cell-mean for each between-subj group and then calculates the SS array.
"""
workd = subtr_cellmeans(workd,subjslots)
sserr = multivar_SScalc(workd)
return sserr
def Drestrict_mean(workd,subjslots):
"""
RESTRICTS GRAND MEAN. Subtracts D-variable cell-mean for each between-
subj group, and then adds back each D-variable's grand mean.
"""
# subtract D-variable cell-mean for each (btw-subj) group
errors = subtr_cellmeans(workd,subjslots)
# add back in appropriate grand mean from individual scores
grandDmeans = amean(workd,0,1)
errors = errors + N.transpose(grandDmeans) # errors has reversed dims!!
# SS for mean-restricted model is calculated below. Note: already put
# subj as last dim because later code expects this code here to leave
# workd that way
sserr = multivar_SScalc(errors)
return sserr
def Drestrict_source(workd,subjslots,source):
"""
Calculates error for a given model on array workd. Subjslots is an
array of 1s and 0s corresponding to whether or not the subject is a
member of that between-subjects variable combo. source is the code
for the type of model to calculate. source=-1 means no restriction;
source=0 means to restrict workd's grand mean; source>0 means to
restrict the columns of the main data array, DA, specified (in binary)
by the source-value.
Usage: Derrorcalc(workd,subjslots,source) source:-1=nothing, 0=mean
Returns: SS array for multivariate F calculation
"""
###
### RESTRICT COLUMNS/DIMENSIONS SPECIFIED IN source (BINARY)
### (i.e., is the value of source not equal to 0 or -1?)
###
if source > 0:
sourcewithins = (source-1) & Bwithins
sourcebetweens = (source-1) & Bbetweens
dindex = Bwonly_sources.index(sourcewithins)
all_cellmeans = N.transpose(DM[dindex],[-1]+list(range(0,len(DM[dindex].shape)-1)))
all_cellns = N.transpose(DN[dindex],[-1]+list(range(0,len(DN[dindex].shape)-1)))
hn = aharmonicmean(all_cellns)
levels = D[dindex].shape[1] # GENERAL, 'cause each workd is always 2D
SSm = N.zeros((levels,levels),'f') #called RCm=SCm in Lindman,p.317-8
tworkd = N.transpose(D[dindex])
## Calculate SSw, within-subj variance (Lindman approach)
RSw = N.zeros((levels,levels),'f')
RSinter = N.zeros((levels,levels),N.PyObject)
for i in range(levels):
for j in range(i,levels):
RSw[i,j] = RSw[j,i] = N.sum(tworkd[i]*tworkd[j])
cross = all_cellmeans[i] * all_cellmeans[j]
multfirst = asum(cross*all_cellns[i])
RSinter[i,j] = RSinter[j,i] = N.asarray(multfirst)
SSm[i,j] = SSm[j,i] = (amean(all_cellmeans[i]) *
amean(all_cellmeans[j]) *
len(all_cellmeans[i]) *hn)
SSw = RSw - RSinter
### HERE BEGINS THE MAXWELL & DELANEY APPROACH TO CALCULATING SS
Lsource = makelist(sourcebetweens,Nfactors+1)
btwsourcecols = (N.array(list(map(Bscols.index,Lsource)))-1).tolist()
Bbtwnonsourcedims = ~source & Bbetweens
Lbtwnonsourcedims = makelist(Bbtwnonsourcedims,Nfactors+1)
btwnonsourcedims = (N.array(list(map(Bscols.index,Lbtwnonsourcedims)))-1).tolist()
## Average Marray over non-source dimensions
sourceDMarray = DM[dindex] *1.0
for dim in btwnonsourcedims: # collapse all non-source dims
if dim == len(DM[dindex].shape)-1:
raise ValueError("Crashing ... shouldn't ever collapse ACROSS variables")
sourceDMarray = amean(sourceDMarray,dim,1)
## Calculate harmonic means for each level in source
sourceDNarray = aharmonicmean(DN[dindex],btwnonsourcedims,1)
## Calc grand average (ga), used for ALL effects
variableNs = asum(sourceDNarray,
list(range(len(sourceDMarray.shape)-1)))
ga = asum((sourceDMarray*sourceDNarray) /
variableNs,
list(range(len(sourceDMarray.shape)-1)),1)
## If GRAND interaction, use harmonic mean of ALL cell Ns
if source == Nallsources-1:
sourceDNarray = aharmonicmean(DN[dindex],
list(range(len(sourceDMarray.shape)-1)))
## Calc all SUBSOURCES to be subtracted from sourceMarray (M&D p.320)
sub_effects = ga *1.0 # start with grand mean
for subsource in range(3,source-2,2):
## Make a list of the non-subsource dimensions
subsourcebtw = (subsource-1) & Bbetweens
if (propersubset(subsource-1,source-1) and
(subsource-1)&Bwithins == (source-1)&Bwithins and
(subsource-1) != (source-1)&Bwithins):
sub_effects = (sub_effects +
alleffects[alleffsources.index(subsource)])
## Calc this effect (a(j)'s, b(k)'s, ab(j,k)'s, whatever)
effect = sourceDMarray - sub_effects
## Save it so you don't have to calculate it again next time
alleffects.append(effect)
alleffsources.append(source)
## Calc and save sums of squares for this source
SS = N.zeros((levels,levels),'f')
SS = asum((effect**2 *sourceDNarray) *
N.multiply.reduce(N.take(DM[dindex].shape,btwnonsourcedims)),
list(range(len(sourceDMarray.shape)-1)))
## Save it so you don't have to calculate it again next time
SSlist.append(SS)
SSsources.append(source)
return SS
def multivar_SScalc(workd):
###
### DO SS CALCS ON THE OUTPUT FROM THE SOURCE=0 AND SOURCE=-1 CASES
###
# this section expects workd to have subj. in LAST dimension!!!!!!
if len(workd.shape) == 1:
levels = 1
else:
levels = workd.shape[0] # works because workd is always 2D
sserr = N.zeros((levels,levels),'f')
for i in range(levels):
for j in range(i,levels):
ssval = N.add.reduce(workd[i]*workd[j])
sserr[i,j] = ssval
sserr[j,i] = ssval
return sserr
def subtr_cellmeans(workd,subjslots):
"""
Subtract all cell means when within-subjects factors are present ...
i.e., calculate full-model using a D-variable.
"""
# Get a list of all dims that are source and between-subj
sourcedims = makelist(Bbetweens,Nfactors+1)
# Now, fix this list by mapping the dims from the original source
# to dims for a between-subjects variable (namely, subjslots)
transidx = list(range(len(subjslots.shape)))[1:] + [0] # put subj dim at end
tsubjslots = N.transpose(subjslots,transidx) # get all Ss for this idx
tworkd = N.transpose(workd) # swap subj. and variable dims
errors = 1.0 * tworkd
if len(sourcedims) == 0:
idx = [-1]
loopcap = [0]
if len(sourcedims) != 0:
btwsourcedims = list(map(Bscols.index,sourcedims))
idx = [0] * len(btwsourcedims)
idx[0] = -1 # compensate for pre-increment of 1st slot in incr()
# Get a list of the maximum values each factor can handle
loopcap = N.take(N.array(Nlevels),sourcedims)-1
### WHILE STILL MORE GROUPS, CALCULATE GROUP MEAN FOR EACH D-VAR
while incr(idx,loopcap) != -1: # loop through source btw level-combos
mask = tsubjslots[idx]
thisgroup = tworkd*mask[N.NewAxis,:]
groupmns = amean(N.compress(mask,thisgroup),1)
### THEN SUBTRACT THEM FROM APPROPRIATE SUBJECTS
errors = errors - N.multiply.outer(groupmns,mask)
return errors
def F_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""
Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
Usage: F_value_wilks_lambda(ER,EF,dfnum,dfden,a,b)
"""
if type(ER) in [int, float]:
ER = N.array([[ER]])
if type(EF) in [int, float]:
EF = N.array([[EF]])
lmbda = LA.determinant(EF) / LA.determinant(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = math.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) )
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (m*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def member(factor,source):
return (1 << factor) & source != 0
def setsize(source):
size = 0
for bit in source:
if bit == 1:
size = size + 1
return size
def subset (a,b):
return (a&b)==a
def propersubset (a,b):
sub = ((a&b)==a)
if a==b:
sub = 0
return sub
def numlevels(source,Nlevels):
for i in range(30): # find the biggest i such that 2**i >= source
if 1<<i >= source:
break
levelcount = 1
for j in range(i): # loop up through each bit
if subset(1<<j,source):
levelcount = levelcount * Nlevels[j] - 1
return levelcount
def numbitson(a):
numon = 0
while a>0:
numon = numon + a%2
a = a>>1
return numon
def makebin(sourcelist):
outbin = 0
for item in sourcelist:
outbin = outbin + 2**item
return outbin
def makelist(source,ncols):
levellist = []
for j in range(ncols):
if subset(1<<j,source):
levellist.append(j)
return levellist
def round4(num):
try:
return round(num,4)
except:
return 'N/A'
|
"""
Image resampling methods.
"""
import numpy as np
import scipy.interpolate
import scipy.ndimage
from sunpy.util.exceptions import warn_deprecated
__all__ = ['resample', 'reshape_image_to_4d_superpixel']
def resample(orig, dimensions, method='linear', center=False, minusone=False):
"""
Returns a new `numpy.ndarray` that has been resampled up or down.
Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL's ``congrid`` routine (which apparently originally came from a
VAX/VMS routine of the same name.)
Parameters
----------
orig : `numpy.ndarray`
Original input array.
dimensions : `tuple`
Dimensions that new `numpy.ndarray` should have.
method : {``"neighbor"``, ``"nearest"``, ``"linear"``, ``"spline"``}, optional
Method to use for resampling interpolation.
* nearest and linear - Uses "n x 1D" interpolations calculated by
`scipy.interpolate.interp1d`.
* spline - Uses `scipy.ndimage.map_coordinates`
center : `bool`, optional
If `False` (default) the interpolation points are at the front edge of the bin.
If `True`, interpolation points are at the centers of the bins
minusone : `bool`, optional
For ``orig.shape = (i,j)`` & new dimensions ``= (x,y)``, if set to `False`
(default) ``orig`` is resampled by factors of ``(i/x) * (j/y)``,
otherwise ``orig`` is resampled by ``(i-1)/(x-1) * (j-1)/(y-1)``.
This prevents extrapolation one element beyond bounds of input array.
Returns
-------
out : `numpy.ndarray`
A new `numpy.ndarray` which has been resampled to the desired dimensions.
References
----------
https://scipy-cookbook.readthedocs.io/items/Rebinning.html
"""
# Verify that number dimensions requested matches original shape
if len(dimensions) != orig.ndim:
raise UnequalNumDimensions("Number of dimensions must remain the same "
"when calling resample.")
# TODO: Will this be okay for integer (e.g. JPEG 2000) data?
if orig.dtype not in [np.float64, np.float32]:
orig = orig.astype(np.float64)
dimensions = np.asarray(dimensions, dtype=np.float64)
m1 = np.array(minusone, dtype=np.int64) # array(0) or array(1)
offset = np.float64(center * 0.5) # float64(0.) or float64(0.5)
# Resample data
if method == 'neighbor':
warn_deprecated('Using "neighbor" as a method for resampling is deprecated. '
'Use "nearest" instead.')
data = _resample_neighbor(orig, dimensions, offset, m1)
elif method in ['nearest', 'linear']:
data = _resample_nearest_linear(orig, dimensions, method,
offset, m1)
elif method == 'spline':
data = _resample_spline(orig, dimensions, offset, m1)
else:
raise UnrecognizedInterpolationMethod("Unrecognized interpolation "
"method requested.")
return data
def _resample_nearest_linear(orig, dimensions, method, offset, m1):
"""
Resample Map using either linear or nearest interpolation.
Parameters
----------
orig : array-like
Original data.
dimensions : `tuple`
Dimensions of resampled data.
method : `str`
Interpolation method passed to `~scipy.interpolate.interpn`
offset : `float`
Either 0 or 0.5, depending on whether interpolation is at the edge or
centers of bins.
m1 : 0 or 1
For ``orig.shape = (i,j)`` & new dimensions ``= (x,y)``, if set to `False`
(default) ``orig`` is resampled by factors of ``(i/x) * (j/y)``,
otherwise ``orig`` is resampled by ``(i-1)/(x-1) * (j-1)/(y-1)``.
This prevents extrapolation one element beyond bounds of input array.
"""
old_coords = [np.arange(i, dtype=float) + offset for i in orig.shape]
scale = (orig.shape - m1) / (dimensions - m1)
new_coords = [(np.arange(dimensions[i], dtype=float) + offset) * scale[i] for i in
range(len(dimensions))]
new_coords = np.stack(np.meshgrid(*new_coords, indexing='ij'), axis=-1)
# fill_value = None extrapolates outside the domain
new_data = scipy.interpolate.interpn(old_coords, orig, new_coords,
method=method, bounds_error=False,
fill_value=None)
return new_data
def _resample_neighbor(orig, dimensions, offset, m1):
"""
Resample Map using closest-value interpolation.
"""
# This can be deleted once the deprecation above in resample is expired
dimlist = []
dimensions = np.asarray(dimensions, dtype=int)
for i in range(orig.ndim):
base = np.indices(dimensions)[i]
dimlist.append((orig.shape[i] - m1) / (dimensions[i] - m1) *
(base + offset) - offset)
cd = np.array(dimlist).round().astype(int)
return orig[tuple(list(cd))]
def _resample_spline(orig, dimensions, offset, m1):
"""
Resample Map using spline-based interpolation.
"""
nslices = [slice(0, j) for j in list(dimensions)]
newcoords = np.mgrid[nslices]
newcoords_dims = list(range(newcoords.ndim))
# make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += offset
deltas = (np.asarray(orig.shape) - m1) / (dimensions - m1)
newcoords_tr *= deltas
newcoords_tr -= offset
return scipy.ndimage.map_coordinates(orig, newcoords)
def reshape_image_to_4d_superpixel(img, dimensions, offset):
"""
Re-shape the two dimension input image into a a four dimensional array
whose first and third dimensions express the number of original pixels in
the "x" and "y" directions that form one superpixel. The reshaping makes it
very easy to perform operations on superpixels.
An application of this reshaping is the following. Let's say you have an
array::
x = np.array([[0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 0, 0]])
and you want to sum over 2x2 non-overlapping sub-arrays. For example, you
could have a noisy image and you want to increase the signal-to-noise ratio.
Summing over all the non-overlapping 2x2 sub-arrays will create a
superpixel array of the original data. Every pixel in the superpixel array
is the sum of the values in a 2x2 sub-array of the original array.
This summing can be done by reshaping the array::
y = x.reshape(3,2,3,2)
and then summing over the 1st and third directions::
y2 = y.sum(axis=3).sum(axis=1)
which gives the expected array::
array([[0, 3, 2],
[2, 0, 4],
[1, 2, 2]])
Parameters
----------
img : `numpy.ndarray`
A two-dimensional `numpy.ndarray` of the form ``(y, x)``.
dimensions : array-like
A two element array-like object containing integers that describe the
superpixel summation in the ``(y, x)`` directions.
offset : array-like
A two element array-like object containing integers that describe
where in the input image the array reshaping begins in the ``(y, x)``
directions.
Returns
-------
A four dimensional `numpy.ndarray` that can be used to easily create
two-dimensional arrays of superpixels of the input image.
References
----------
https://mail.scipy.org/pipermail/numpy-discussion/2010-July/051760.html
"""
# make sure the input dimensions are integers
dimensions = [int(dim) for dim in dimensions]
# New dimensions of the final image
na = int(np.floor((img.shape[0] - offset[0]) / dimensions[0]))
nb = int(np.floor((img.shape[1] - offset[1]) / dimensions[1]))
# Reshape up to a higher dimensional array which is useful for higher
# level operations
return (img[int(offset[0]):int(offset[0] + na * dimensions[0]),
int(offset[1]):int(offset[1] + nb * dimensions[1])]).reshape(na, dimensions[0], nb, dimensions[1])
class UnrecognizedInterpolationMethod(ValueError):
"""
Unrecognized interpolation method specified.
"""
class UnequalNumDimensions(ValueError):
"""
Number of dimensions does not match input array.
"""
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from swiftclient import utils as swift_utils
from ironic.common import exception as exc
from ironic.common.glance_service import base_image_service
from ironic.common.glance_service import service
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import utils
glance_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of URL schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
# To upload this key to Swift:
# swift post -m Temp-Url-Key:correcthorsebatterystaple
cfg.StrOpt('swift_temp_url_key',
help='The secret token given to Swift to allow temporary URL '
'downloads. Required for temporary URLs.',
secret=True),
cfg.IntOpt('swift_temp_url_duration',
default=1200,
help='The length of time in seconds that the temporary URL '
'will be valid for. Defaults to 20 minutes. If some '
'deploys get a 401 response code when trying to download '
'from the temporary URL, try raising this duration.'),
cfg.StrOpt('swift_endpoint_url',
help='The "endpoint" (scheme, hostname, optional port) for '
'the Swift URL of the form '
'"endpoint_url/api_version/account/container/object_id". '
'Do not include trailing "/". '
'For example, use "https://swift.example.com". '
'Required for temporary URLs.'),
cfg.StrOpt('swift_api_version',
default='v1',
help='The Swift API version to create a temporary URL for. '
'Defaults to "v1". Swift temporary URL format: '
'"endpoint_url/api_version/account/container/object_id"'),
cfg.StrOpt('swift_account',
help='The account that Glance uses to communicate with '
'Swift. The format is "AUTH_uuid". "uuid" is the '
'UUID for the account configured in the glance-api.conf. '
'Required for temporary URLs. For example: '
'"AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". '
'Swift temporary URL format: '
'"endpoint_url/api_version/account/container/object_id"'),
cfg.StrOpt('swift_container',
default='glance',
help='The Swift container Glance is configured to store its '
'images in. Defaults to "glance", which is the default '
'in glance-api.conf. '
'Swift temporary URL format: '
'"endpoint_url/api_version/account/container/object_id"'),
]
CONF = cfg.CONF
CONF.register_opts(glance_opts, group='glance')
class GlanceImageService(base_image_service.BaseImageService,
service.ImageService):
def detail(self, **kwargs):
return self._detail(method='list', **kwargs)
def show(self, image_id):
return self._show(image_id, method='get')
def download(self, image_id, data=None):
return self._download(image_id, method='data', data=data)
def create(self, image_meta, data=None):
image_id = self._create(image_meta, method='create', data=None)['id']
return self.update(image_id, None, data)
def update(self, image_id, image_meta, data=None, purge_props=False):
# NOTE(ghe): purge_props not working until bug 1206472 solved
return self._update(image_id, image_meta, data, method='update',
purge_props=False)
def delete(self, image_id):
return self._delete(image_id, method='delete')
def swift_temp_url(self, image_info):
"""Generate a no-auth Swift temporary URL.
This function will generate the temporary Swift URL using the image
id from Glance and the config options: 'swift_endpoint_url',
'swift_api_version', 'swift_account' and 'swift_container'.
The temporary URL will be valid for 'swift_temp_url_duration' seconds.
This allows Ironic to download a Glance image without passing around
an auth_token.
:param image_info: The return from a GET request to Glance for a
certain image_id. Should be a dictionary, with keys like 'name' and
'checksum'. See
http://docs.openstack.org/developer/glance/glanceapi.html for
examples.
:returns: A signed Swift URL from which an image can be downloaded,
without authentication.
:raises: InvalidParameterValue if Swift config options are not set
correctly.
:raises: ImageUnacceptable if the image info from Glance does not
have a image ID.
"""
self._validate_temp_url_config()
if ('id' not in image_info or not
utils.is_uuid_like(image_info['id'])):
raise exc.ImageUnacceptable(_(
'The given image info does not have a valid image id: %s')
% image_info)
url_fragments = {
'endpoint_url': CONF.glance.swift_endpoint_url,
'api_version': CONF.glance.swift_api_version,
'account': CONF.glance.swift_account,
'container': CONF.glance.swift_container,
'object_id': image_info['id']
}
template = '/{api_version}/{account}/{container}/{object_id}'
url_path = template.format(**url_fragments)
path = swift_utils.generate_temp_url(
path=url_path,
seconds=CONF.glance.swift_temp_url_duration,
key=CONF.glance.swift_temp_url_key,
method='GET')
return '{endpoint_url}{url_path}'.format(
endpoint_url=url_fragments['endpoint_url'], url_path=path)
def _validate_temp_url_config(self):
"""Validate the required settings for a temporary URL."""
if not CONF.glance.swift_temp_url_key:
raise exc.InvalidParameterValue(_(
'Swift temporary URLs require a shared secret to be created. '
'You must provide "swift_temp_url_key" as a config option.'))
if not CONF.glance.swift_endpoint_url:
raise exc.InvalidParameterValue(_(
'Swift temporary URLs require a Swift endpoint URL. '
'You must provide "swift_endpoint_url" as a config option.'))
if not CONF.glance.swift_account:
raise exc.InvalidParameterValue(_(
'Swift temporary URLs require a Swift account string. '
'You must provide "swift_account" as a config option.'))
if CONF.glance.swift_temp_url_duration < 0:
raise exc.InvalidParameterValue(_(
'"swift_temp_url_duration" must be a positive integer.'))
def _get_location(self, image_id):
"""Returns the direct url representing the backend storage location,
or None if this attribute is not shown by Glance.
"""
image_meta = self.call('get', image_id)
if not service_utils.is_image_available(self.context, image_meta):
raise exc.ImageNotFound(image_id=image_id)
return getattr(image_meta, 'direct_url', None)
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Custom service command tests"""
import unittest
from knack.util import CLIError
import sfctl.custom_service as sf_c
# pylint: disable=invalid-name
class ServiceTests(unittest.TestCase): # pylint: disable=too-many-public-methods
"""Service tests"""
def test_parse_none_correlation_desc(self):
"""Parse None correlation description returns None"""
self.assertIs(sf_c.correlation_desc(None, None), None)
def test_parse_partial_correlation_desc(self):
"""Parse partial correlation description raises error"""
with self.assertRaises(CLIError):
sf_c.correlation_desc('test_svc', None)
def test_parse_complete_correlation_desc(self):
"""Parse a single correlation description"""
res = sf_c.correlation_desc('test', 'Affinity')
self.assertEqual(res.service_name, 'test')
self.assertEqual(res.scheme, 'Affinity')
def test_parse_empty_load_metrics(self):
"""Parse empty load metrics returns None"""
self.assertIsNone(sf_c.parse_load_metrics(''))
def test_parse_none_load_metrics(self):
"""Parse none load metrics returns None"""
self.assertIsNone(sf_c.parse_load_metrics(None))
def test_parse_scaling_policy_test(self):
"""Parse scaling policies"""
res = sf_c.parse_scaling_policy([{
'mechanism':{'kind':'PartitionInstanceCount', 'min_instance_count':2, 'max_instance_count':4, 'scale_increment':2}, #pylint: disable=line-too-long
'trigger':{'kind':'AveragePartitionLoad', 'metric_name':'MetricA', 'upper_load_threshold':20.0, 'lower_load_threshold':10.0, 'scale_interval_in_seconds':1000} #pylint: disable=line-too-long
}, {
'mechanism':{'kind':'AddRemoveIncrementalNamedPartition', 'min_partition_count':3, 'max_partition_count':6, 'scale_increment':2}, #pylint: disable=line-too-long
'trigger':{'kind':'AverageServiceLoad', 'metric_name':'MetricB', 'upper_load_threshold':30.0, 'lower_load_threshold':10.0, 'scale_interval_in_seconds':1000} #pylint: disable=line-too-long
}])
self.assertEqual(len(res), 2)
self.assertEqual(res[0].scaling_trigger.metric_name, 'MetricA')
self.assertEqual(res[0].scaling_trigger.upper_load_threshold, 20.0)
self.assertEqual(res[0].scaling_trigger.lower_load_threshold, 10.0)
self.assertEqual(res[0].scaling_mechanism.max_instance_count, 4)
self.assertEqual(res[1].scaling_trigger.scale_interval_in_seconds, 1000)
self.assertEqual(res[1].scaling_trigger.upper_load_threshold, 30.0)
self.assertEqual(res[1].scaling_trigger.lower_load_threshold, 10.0)
self.assertEqual(res[1].scaling_mechanism.scale_increment, 2)
def test_parse_incomplete_load_metrics(self):
"""Parse single incomplete load metrics definition"""
res = sf_c.parse_load_metrics([{'name': 'test_metric',
'default_load': 10}])
self.assertEqual(len(res), 1)
res = res[0]
self.assertEqual(res.name, 'test_metric')
self.assertIsNone(res.weight)
self.assertIsNone(res.primary_default_load)
self.assertIsNone(res.secondary_default_load)
self.assertEqual(res.default_load, 10)
def test_parse_invalid_placement_policy_type(self):
"""Parsing invalid placement policy type raises error"""
with self.assertRaises(CLIError):
sf_c.parse_placement_policies([{'type': 'test',
'domain_name': 'test'}])
def test_parse_missing_placement_policy_domain_name(self):
"""Parsing missing domain name in placement policy raises error"""
with self.assertRaises(CLIError):
sf_c.parse_placement_policies([{'type': 'PreferPrimaryDomain'}])
def test_parse_all_placement_policy_types(self):
"""Parse all placement policy types"""
from azure.servicefabric.models.service_placement_non_partially_place_service_policy_description import ServicePlacementNonPartiallyPlaceServicePolicyDescription # pylint: disable=line-too-long
from azure.servicefabric.models.service_placement_prefer_primary_domain_policy_description import ServicePlacementPreferPrimaryDomainPolicyDescription # pylint: disable=line-too-long
from azure.servicefabric.models.service_placement_required_domain_policy_description import ServicePlacementRequiredDomainPolicyDescription # pylint: disable=line-too-long
from azure.servicefabric.models.service_placement_require_domain_distribution_policy_description import ServicePlacementRequireDomainDistributionPolicyDescription # pylint: disable=line-too-long
res = sf_c.parse_placement_policies([{
'type': 'NonPartiallyPlaceService'
}, {
'type': 'PreferPrimaryDomain',
'domain_name': 'test_1'
}, {
'type': 'RequireDomain',
'domain_name': 'test-22'
}, {
'type': 'RequireDomainDistribution',
'domain_name': 'test_3'
}])
self.assertIsInstance(
res[0],
ServicePlacementNonPartiallyPlaceServicePolicyDescription
)
self.assertIsInstance(
res[1],
ServicePlacementPreferPrimaryDomainPolicyDescription
)
self.assertEqual(res[1].domain_name, 'test_1')
self.assertIsInstance(
res[2],
ServicePlacementRequiredDomainPolicyDescription
)
self.assertEqual(res[2].domain_name, 'test-22')
self.assertIsInstance(
res[3],
ServicePlacementRequireDomainDistributionPolicyDescription
)
self.assertEqual(res[3].domain_name, 'test_3')
def test_invalid_move_cost(self):
"""Invalid move cost raises error"""
with self.assertRaises(CLIError):
sf_c.validate_move_cost('test')
def test_empty_stateful_flags(self):
"""Empty stateful flags returns zero"""
self.assertEqual(sf_c.stateful_flags(), 0)
def test_all_stateful_flags(self):
"""All stateful flags sum up to correct value"""
self.assertEqual(sf_c.stateful_flags(10, 10, 10), 7)
def test_empty_service_update_flags(self):
"""Empty service update flags returns zero"""
self.assertEqual(sf_c.service_update_flags(), 0)
def test_all_service_update_flags(self):
"""All service update flags sum up to correct value"""
self.assertEqual(sf_c.service_update_flags(target_rep_size=1,
rep_restart_wait=10,
quorum_loss_wait=10,
standby_rep_keep=10,
min_rep_size=5,
placement_constraints='',
placement_policy='',
correlation='',
metrics='',
move_cost='high'), 1023)
def test_service_create_missing_service_state(self):
"""Service create must specify exactly stateful or stateless"""
with self.assertRaises(CLIError):
sf_c.validate_service_create_params(False, False, None, None,
None, None, None, None)
with self.assertRaises(CLIError):
sf_c.validate_service_create_params(True, True, None, None, None,
None, None, None)
def test_service_create_target_size_matches_state(self):
"""Service create target replica set and instance count match
stateful or stateless"""
with self.assertRaises(CLIError):
sf_c.validate_service_create_params(True, False, True, False,
False, 10, None, None)
with self.assertRaises(CLIError):
sf_c.validate_service_create_params(False, True, True, False,
False, None, 10, None)
def test_service_create_missing_stateful_replica_set_sizes(self):
"""Service create without target or min replica set sizes raises
error"""
with self.assertRaises(CLIError):
sf_c.validate_service_create_params(True, False, True, False,
False, None, 10, None)
def test_parse_incomplete_partition_policy_named_scheme(self):
"""Parsing named partition policy with unspecified names raises
error"""
with self.assertRaises(CLIError):
sf_c.parse_partition_policy(True, None, None, None, None, None,
None)
def test_parse_incomplete_partition_policy_int(self):
"""Parsing int partition policy with incomplete args raises error"""
with self.assertRaises(CLIError):
sf_c.parse_partition_policy(False, None, True, 0, 5, None, False)
def test_parse_multiple_partition_policy(self):
"""Parsing multiple different partition polices raises error"""
with self.assertRaises(CLIError):
sf_c.parse_partition_policy(True, ['test'], True, 0, 5, 3, True)
def test_parse_valid_partition_policy(self):
"""Parsing valid partition polices returns correct policies"""
from azure.servicefabric.models.named_partition_scheme_description import NamedPartitionSchemeDescription # pylint: disable=line-too-long
from azure.servicefabric.models.singleton_partition_scheme_description import SingletonPartitionSchemeDescription # pylint:disable=line-too-long
from azure.servicefabric.models.uniform_int64_range_partition_scheme_description import UniformInt64RangePartitionSchemeDescription # pylint:disable=line-too-long
res = sf_c.parse_partition_policy(True, ['test'], False, None, None,
None, False)
self.assertIsInstance(res, NamedPartitionSchemeDescription)
self.assertEqual(res.count, 1)
self.assertEqual(res.names, ['test'])
res = sf_c.parse_partition_policy(False, None, True, 1, 5, 3, False)
self.assertIsInstance(res, UniformInt64RangePartitionSchemeDescription)
self.assertEqual(res.high_key, 5)
self.assertEqual(res.low_key, 1)
self.assertEqual(res.count, 3)
res = sf_c.parse_partition_policy(False, None, False, None, None, None,
True)
self.assertIsInstance(res, SingletonPartitionSchemeDescription)
def test_activation_mode_invalid(self):
"""Invalid activation mode specified raises error"""
with self.assertRaises(CLIError):
sf_c.validate_activation_mode('test')
def test_activation_mode_none(self): # pylint: disable=no-self-use
"""None activation mode is considered valid"""
sf_c.validate_activation_mode(None)
def test_service_update_specify_state(self):
"""Service update incorrectly specifying service state raises error"""
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(False, False, 10, 0, 10,
10, 10, False)
def test_service_update_stateful_invalid_params(self):
"""Stateful service update with invalid args raises error"""
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(False, True, 5, 3, 10,
10, 10, 1)
def test_service_update_stateless_invalid_params(self):
"""Stateless service update with invalid args raises error"""
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(True, False, 5, None, None,
None, None, 10)
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(True, False, None, 1, None,
None, None, 10)
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(True, False, None, None, 10,
None, None, 10)
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(True, False, None, None, None,
10, None, 10)
with self.assertRaises(CLIError):
sf_c.validate_update_service_params(True, False, None, None, None,
None, 5, 10)
|
# from def_InstantRaise import InstanceRaise
# from def_Momentum1mo import Momentum1mo
# from def_Momentum3mos import Momentum3mos
# from def_Updown import Updown
from def_Dual import Dual
# InstanceRaise() # 16 min
# Momentum3mos() # 3 min 57 sec
# Momentum1mo() # 4 min 40 sec
# Updown() # 4 min 20 sec
Dual() # 40 sec
|
import wx
from resource_path import resource_path
from subprocess import Popen
from SpreadsheettoEAD.func.globals import init
import SpreadsheettoEAD.func.globals
import xml.etree.cElementTree as ET
import wx.lib.scrolledpanel as scrolled
from SpreadsheettoEAD import SpreadsheettoEAD
import traceback
import sys
from threading import Thread
from wx.lib.pubsub import pub
########################################################################
class ProgressBarThread(Thread):
"""Test Worker Thread Class."""
#----------------------------------------------------------------------
def __init__(self, input_xml, template_xml):
"""Init Worker Thread Class."""
Thread.__init__(self)
self._input_xml = input_xml
self._template_xml = template_xml
self.start() # start the thread
#----------------------------------------------------------------------
def run(self):
"""Run Worker Thread."""
# This is the code executing in the new thread.
generic_error = "I'm afraid there has been an unhandled error. This may be a problem with EADMachine, or you may be using uncommon EAD encoding that is not supported. If you would like to help fix these issues, please send the error_log.txt file along with the XML files you are using to the developer at GWiedeman@Albany.edu."
try:
comp_EAD, comp_HTML = SpreadsheettoEAD.SpreadsheettoEAD(self._input_xml, self._template_xml)
wx.CallAfter(pub.sendMessage, "finish_EAD", output_EAD = comp_EAD, output_HTML = comp_HTML)
except:
error_log = open("error_log.txt", "a")
error_log.write("Spreadsheet to EAD." + traceback.format_exc() + "######################################################################################")
error_log.close()
print traceback.format_exc()
errorbox = wx.MessageDialog(None, generic_error, "Unhandled Error!", wx.OK | wx.ICON_ERROR)
errorbox.ShowModal()
########################################################################
class MyProgressDialog(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Dialog.__init__(self, None, title="Creating EAD file...", size=(500,150))
self.count = 0
if "ask_html" in SpreadsheettoEAD.func.globals.new_elements:
self.progress = wx.Gauge(self, range=15)
else:
self.progress = wx.Gauge(self, range=13)
self.progresstxt = wx.StaticText(self, id=-1, label="Creating EAD file...", style=wx.ALIGN_CENTER)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.progress, 0, wx.ALL | wx.EXPAND, 10)
self.sizer.Add(self.progresstxt, 0, wx.ALL | wx.EXPAND, 10)
# create a pubsub receiver
pub.subscribe(self.updateProgress, "update")
self.SetSizer(self.sizer)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
ProgressBarThread.stopped = True
self.Destroy()
#----------------------------------------------------------------------
def updateProgress(self, msg):
""""""
self.count += 1
if "ask_html" in SpreadsheettoEAD.func.globals.new_elements:
if self.count >= 15:
self.Destroy()
else:
if self.count >= 13:
self.Destroy()
self.progress.SetValue(self.count)
self.sizer.Hide(self.progresstxt)
self.progresstxt = wx.StaticText(self, id=-1, label=msg, style=wx.ALIGN_CENTER, pos=(20, 30))
self.sizer.Add(self.progresstxt, 0, wx.ALL | wx.EXPAND, 10)
print msg
########################################################################
class TabPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent)
#global variables
init()
self.panel_one = FirstPanel(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.panel_one, 1, wx.EXPAND)
#self.sizer.Add(self.panel_two, 1, wx.EXPAND)
self.SetSizer(self.sizer)
passID = ""
pub.subscribe(self.saveFile, "finish_EAD")
def nextClick(self, event, FAinput, Teminput):
self.panel_one.Hide()
self.panel_two = NextPanel(self)
self.sizer.Add(self.panel_two, 1, wx.EXPAND)
self.panel_two.Show()
self.Layout()
def goClick(self, event, input_xml, template_xml, cID):
self.panel_two.Hide()
if cID.startswith('nam_'):
self.passID = cID.replace('nam_', '')
else:
self.passID = cID
btn = event.GetEventObject()
#btn.Disable()
ProgressBarThread(input_xml, template_xml)
dlg = MyProgressDialog()
dlg.ShowModal()
#output_EAD, output_HTML = SpreadsheettoEAD.SpreadsheettoEAD(input_xml, template_xml)
btn.Enable()
def saveFile(self, output_EAD, output_HTML):
saveFileDialog = wx.FileDialog(self, "Save As", "", self.passID,
"XML files (*.xml)|*.xml",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
result = saveFileDialog.ShowModal()
inFile = saveFileDialog.GetPath()
saveFileDialog.Destroy()
if result == wx.ID_OK: #Save button was pressed
#output_element = ET.fromstring(output_EAD)
#output1 = ET.ElementTree(output_element)
#output1.write(inFile, xml_declaration=True, encoding='utf-8', method='xml')
#with open(inFile, 'w') as f:
#f.write('<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE ead SYSTEM "ead.dtd">')
#output1.write(f, 'utf-8')
with open(inFile, "w") as f:
#f.write('<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE ead SYSTEM "ead.dtd">')
#output_EAD.write(f, 'utf-8')
f.write(output_EAD)
elif result == wx.ID_CANCEL: #Either the cancel button was pressed or the window was closed
pass
if output_HTML == False:
pass
else:
saveFileDialog2 = wx.FileDialog(self, "Save As", "", self.passID,
"HTML files (*.html)|*.html",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
result2 = saveFileDialog2.ShowModal()
inFile2 = saveFileDialog2.GetPath()
saveFileDialog2.Destroy()
if result2 == wx.ID_OK: #Save button was pressed
output_web = ET.fromstring(output_HTML)
output2 = ET.ElementTree(output_web)
#output2.write(inFile2, xml_declaration=False)
with open(inFile2, 'w') as g:
g.write('<!DOCTYPE html>')
output2.write(g, 'utf-8')
#output_HTML.write(inFile2, xml_declaration=True, encoding='utf-8', method='xml')
#with open(inFile2, "w") as g:
#g.write(output_HTML)
elif result2 == wx.ID_CANCEL: #Either the cancel button was pressed or the window was closed
pass
SpreadsheettoEAD.func.globals.new_elements = [w for w in SpreadsheettoEAD.func.globals.new_elements if w != "add_unitid"]
SpreadsheettoEAD.func.globals.new_elements = [x for x in SpreadsheettoEAD.func.globals.new_elements if x != "ask_ualbany"]
SpreadsheettoEAD.func.globals.new_elements = [y for y in SpreadsheettoEAD.func.globals.new_elements if y != "ask_fileunitid"]
SpreadsheettoEAD.func.globals.new_elements = [z for z in SpreadsheettoEAD.func.globals.new_elements if z != "ask_html"]
self.panel_one.Show()
########################################################################
class FirstPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent)
def scale_bitmap(bitmap, width, height):
image = wx.ImageFromBitmap(bitmap)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
vbox = wx.BoxSizer(wx.VERTICAL)
logobox = wx.BoxSizer(wx.HORIZONTAL)
titleImage = wx.StaticBitmap(self, size=(360,65))
titleImage.SetBitmap(wx.Bitmap(resource_path('resources/logotitle.gif')))
logobox.Add(titleImage, 1, wx.TOP | wx.EXPAND, 10)
instuctbox = wx.BoxSizer(wx.HORIZONTAL)
instructtxt = wx.StaticText(self, id=-1, label="Spreadsheet + XML Template = <EAD> and <HTML>", style=wx.ALIGN_CENTER)
instuctbox.Add(instructtxt, 1, wx.ALL | wx.EXPAND, 5)
middlebox = wx.BoxSizer(wx.VERTICAL)
inputtxtbox = wx.BoxSizer(wx.HORIZONTAL)
FAtxt = wx.StaticText(self, id=-1, label="Select Finding Aid Data exported from spreadsheet:")
inputtxtbox.Add(FAtxt, 1, wx.TOP | wx.EXPAND, 10)
inputfilebox = wx.BoxSizer(wx.HORIZONTAL)
self.FAinput = wx.TextCtrl(self, size=(450,25))
browseButton = wx.Button(self, label='Browse...', size=(75, 28))
inputfilebox.Add(self.FAinput, 1, wx.ALL | wx.EXPAND, 1)
inputfilebox.Add(browseButton, 1, wx.ALL | wx.EXPAND, 1)
self.Bind(wx.EVT_BUTTON, lambda event: self.browseClick(event, self.FAinput), browseButton)
temtxtbox = wx.BoxSizer(wx.HORIZONTAL)
Temtxt = wx.StaticText(self, id=-1, label="Select Template:")
temtxtbox.Add(Temtxt, 1, wx.TOP | wx.EXPAND, 1)
temfilebox = wx.BoxSizer(wx.HORIZONTAL)
self.Teminput = wx.TextCtrl(self, size=(450,25))
tempButton = wx.Button(self, label='Browse...')
temfilebox.Add(self.Teminput, 1, wx.ALL | wx.EXPAND, 1)
temfilebox.Add(tempButton, 1, wx.ALL | wx.EXPAND, 1)
self.Bind(wx.EVT_BUTTON, lambda event: self.tempClick(event, self.Teminput), tempButton)
middlebox.Add(inputtxtbox, 1, wx.ALL | wx.EXPAND, 1)
middlebox.Add(inputfilebox, 1, wx.ALL | wx.EXPAND, 1)
middlebox.Add(temtxtbox, 1, wx.ALL | wx.EXPAND, 1)
middlebox.Add(temfilebox, 1, wx.BOTTOM | wx.EXPAND, 1)
nextbuttonbox = wx.BoxSizer(wx.HORIZONTAL)
nextbuttonbox.Add((453, 10))
nextButton = wx.Button(self, label='Next >', size=(175, 28))
nextbuttonbox.Add(nextButton, 1, wx.RIGHT | wx.EXPAND, 1.5)
self.Bind(wx.EVT_BUTTON, lambda event: parent.nextClick(event, self.FAinput, self.Teminput), nextButton)
vbox.Add(logobox, 1, wx.ALL | wx.EXPAND, 5)
vbox.Add(instuctbox, 1, wx.ALL | wx.EXPAND, 5)
vbox.Add(middlebox, 1, wx.ALL | wx.EXPAND, 5)
vbox.Add(nextbuttonbox, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizer(vbox)
vbox.Fit(self)
def browseClick(self, event, FAinput):
inputBox = wx.FileDialog(None, "Select Finding Aid Data:", 'Select', '*.xml')
if inputBox.ShowModal()==wx.ID_OK:
inputFile = inputBox.GetPath()
FAinput.Clear()
FAinput.AppendText(inputFile)
def tempClick(self, event, Teminput):
tempBox = wx.FileDialog(None, "Select Template:", 'Select', '*.xml')
if tempBox.ShowModal()==wx.ID_OK:
templateFile = tempBox.GetPath()
Teminput.Clear()
Teminput.AppendText(templateFile)
########################################################################
class NextPanel(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, -1)
overallbox = wx.BoxSizer()
verticalbox = wx.BoxSizer(wx.VERTICAL)
overallbox.Add(verticalbox, 1, wx.LEFT, 5)
question_count = 0
input_xml = parent.panel_one.FAinput.GetValue()
template_xml = parent.panel_one.Teminput.GetValue()
input_file = ET.ElementTree(file=input_xml)
template_file = ET.ElementTree(file=template_xml)
input = input_file.getroot()
template = template_file.getroot()
#makes error messages show as alert windows instead of printing them
SpreadsheettoEAD.func.globals.new_elements.append("ask_gui")
if len(input_xml) < 1: #verifies finding aid data file
from SpreadsheettoEAD.func.messages import error
error("You failed to enter your Finding Aid Data. The first file must be an XML file exported from the EADMachine spreadsheet. Please save your spreadsheet as an XML file and try again.", False)
parent.panel_one.Show()
if len(template_xml) < 1: #verifies template file
from SpreadsheettoEAD.func.messages import error
error("You failed to enter an EAD template file. The EAD Template file must be one of the default XML files in the templates folder, or a valid EAD file. Please enter a valid EAD template file and try again.", False)
parent.panel_one.Show()
if input.tag != "FindingAid" or input.find('CollectionSheet') is None: #verifies finding aid data file
from SpreadsheettoEAD.func.messages import error
error("You entered an incorrect XML file for your Finding Aid Data. The first file must be an XML file exported from the EADMachine spreadsheet. Please save your spreadsheet as an XML file and try again.", False)
parent.panel_one.Show()
elif not str(template.tag).endswith("ead"): #verifies template file
from SpreadsheettoEAD.func.messages import error
error("You entered an incorrect EAD template file. The EAD Template file must be one of the default XML files in the templates folder, or a valid EAD file. Please enter a valid EAD template file and try again.", False)
parent.panel_one.Show()
else:
#removes namespaces
template.tag = "ead"
for all_tags in template.iter():
all_tags.tag = str(all_tags.tag).split("}",1)[-1]
if template.find('archdesc') is None: #verifies template file again
from SpreadsheettoEAD.func.messages import error
error("You entered an incorrect EAD template file. The EAD Template file must be one of the default XML files in the templates folder, or a valid EAD file. Please enter a valid EAD template file and try again.", False)
parent.panel_one.Show()
if template[0].tag == "eadheader":
version = "ead2002"
else:
version = "ead3"
"""
if input.find('CollectionSheet/ProcessedBy') is None:
pass
else:
if input.find('CollectionSheet/ProcessedBy').text:
if template.find('eadheader/filedesc/titlestmt/author') is None or template.find('frontmatter/titlepage/author') is None:
if template.find('control/filedesc/titlestmt/author') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_author", "an author", "<author>")
if input.find('CollectionSheet/Subtitle') is None:
pass
else:
if input.find('CollectionSheet/Subtitle').text:
if template.find('eadheader/filedesc/titlestmt/subtitle') is None or template.find('frontmatter/titlepage/subtitle') is None:
if template.find('control/filedesc/titlestmt/subtitle') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_subtitle", "a Subtitle", "<subtitle>")
if input.find('CollectionSheet/Sponsor') is None:
pass
else:
if input.find('CollectionSheet/Sponsor').text:
if template.find('control/filedesc/titlestmt/sponsor') is None and template.find('eadheader/filedesc/titlestmt/sponsor') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_sponsor", "a Sponsor", "<sponsor>")
if input.find('CollectionSheet/Edition') is None:
pass
else:
if input.find('CollectionSheet/Edition').text:
if template.find('control/filedesc/editionstmt') is None and template.find('eadheader/filedesc/editionstmt') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_edition", "an Edition", "<editionstmt>")
if input.find('CollectionSheet/Publisher/AddressLine') is None:
if input.find('CollectionSheet/Publisher/PublisherName') is None:
pass
else:
if input.find('CollectionSheet/Publisher/PublisherName').text:
if template.find('control/filedesc/publicationstmt/publisher') is None and template.find('eadheader/filedesc/publicationstmt/publisher') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_publication", "a Publisher", "<publicationstmt>")
else:
if input.find('CollectionSheet/Publisher/PublisherName') is None:
pass
else:
if input.find('CollectionSheet/Publisher/PublisherName').text or input.find('CollectionSheet/Publisher/AddressLine').text:
if template.find('control/filedesc/publicationstmt/publisher') is None and template.find('eadheader/filedesc/publicationstmt/publisher') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_publication", "a Publisher", "<publicationstmt>")
if input.find('CollectionSheet/PartofSeries') is None:
pass
else:
if input.find('CollectionSheet/PartofSeries').text:
if template.find('control/filedesc/seriesstmt') is None and template.find('eadheader/filedesc/seriesstmt') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_seriesstmt", "a Series statement", "<seriesstmt>")
if input.find('CollectionSheet/NoteStatements/NoteStatement') is None:
pass
else:
if input.find('CollectionSheet/NoteStatements/NoteStatement').text:
if template.find('control/filedesc/notestmt') is None and template.find('eadheader/filedesc/notestmt') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_notestmt", "a Note Statement", "<notestmt>")
#EAD2002 only questions:
if version == "ead2002":
if input.find('CollectionSheet/EADCreator') is None or input.find('CollectionSheet/FindingAidLanguages/FALanguage/Lang') is None:
pass
else:
if input.find('CollectionSheet/EADCreator').text or input.find('CollectionSheet/FindingAidLanguages/FALanguage/Lang').text:
if template.find('eadheader/profiledesc') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_profile", "EAD languages and/or a creation date", "<profiledesc>")
if input.find('CollectionSheet/EADCreator') is None:
pass
else:
if input.find('CollectionSheet/EADCreator').text:
if template.find('eadheader/profiledesc/creation') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_eadcre", "an EAD creator", "<creation>")
if input.find('CollectionSheet/FindingAidLanguages/FALanguage/Lang') is None:
pass
else:
if input.find('CollectionSheet/FindingAidLanguages/FALanguage/Lang').text:
if template.find('eadheader/profiledesc/langusage') is None or template.find('eadheader/profiledesc/langusage/language') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_lang1", "an EAD Language", "<langusage> or <language>")
if input.find('CollectionSheet/StandardConventions/Convention/Citation') is None or input.find('CollectionSheet/LocalConventions/Convention/Citation') is None:
pass
else:
if input.find('CollectionSheet/StandardConventions/Convention/Citation').text or input.find('CollectionSheet/LocalConventions/Convention/Citation').text:
if template.find('eadheader/profiledesc/descrules') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_descrule", "Descriptive Rules", "<descrules>")
if input.find('CollectionSheet/Revisions/Event/Date') is None:
pass
else:
if input.find('CollectionSheet/Revisions/Event/Date').text:
if template.find('eadheader/revisiondesc') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_revisions", "Revisions", "<revisiondesc>")
#EAD3 only questions:
if version == "ead3":
if input.find('CollectionSheet/OtherID').text:
if template.find('control/otherrecordid') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_otherid", "one or more Other IDs", "<otherrecordid>")
if input.find('CollectionSheet/Representation').text:
if template.find('control/representation') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_rep", "one or more Representations", "<representation>")
if input.find('CollectionSheet/PublicationStatus').text:
if template.find('control/publicationstatus') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_pubstatus", "a Publication Status", "<publicationstatus>")
if input.find('CollectionSheet/FindingAidLanguages/FALanguage/Lang').text:
if template.find('control/languagedeclaration') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_langdec", "a Finding Aid Language or a Language Description", "<languagedeclaration>")
if input.find('CollectionSheet/StandardConventions/Convention/Citation').text:
if template.find('control/conventiondeclaration') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_stancon", "a Standard Convention", "<conventiondeclaration>")
if input.find('CollectionSheet/LocalConventions/Convention/Citation').text:
if template.find('control/localtypedeclaration') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_localcon", "a Local Convention", "<localtypedeclaration>")
if input.find('CollectionSheet/LocalControls/Control/Term').text:
if template.find('control/localcontrol') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_localctr", "a Local Control", "<localcontrol>")
if input.find('CollectionSheet/OutsideSources/Source/SourceName').text:
if template.find('control/sources') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_sources", "external Sources", "<sources>")
if input.find('CollectionSheet/Relations/Relation/RelationEntry').text or input.find('CollectionSheet/Relations/Relation/RelationLink').text or input.find('CollectionSheet/Relations/Relation/RelationNote').text:
if template.find('archdesc/relations') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_relation", "Relations", "<relations>")
"""
#<archdesc> questions:
if input.find('CollectionSheet/CollectionID').text:
if template.find("archdesc/did/unitid") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_unitid", "custom", "Your EAD template does not contain a <unitid> within the collection-level <did>. EADMachine recommends that you add a <unitid> here. Would you like to do so?")
"""
if input.find('CollectionSheet/DateBulk').text:
if template.find("archdesc/did/unitdate[@type='bulk']") is None and template.find("archdesc/did/unittitle/unitdate[@type='bulk']") is None and template.find("archdesc/did/unitdate[@unitdatetype='bulk']") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_bulkdate", "a collection-level bulk date", "<unitdate type='bulk'>")
if input.find('CollectionSheet/Abstract') is None:
pass
else:
if input.find('CollectionSheet/Abstract').text:
if template.find("archdesc/did/abstract") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_abstract", "an Abstract", "<abstract>")
if input.find('CollectionSheet/Origins/Origination/Part') is None:
pass
else:
if input.find('CollectionSheet/Origins/Origination/Part').text:
if template.find('archdesc/did/origination') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_origin", "Origination information", "<origination>")
if template.find('control') is None:
if input.find('CollectionSheet/PhysicalDescriptionSet/PhysicalDescription/Quantity').text or input.find('CollectionSheet/PhysicalDescriptionSet/PhysicalDescription/Dimensions').text:
if template.find('archdesc/did/physdesc') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_physdesc", "collection-level Physical Description", "<physdesc>")
else:
if input.find('CollectionSheet/PhysicalDescriptionSet/PhysicalDescription/Quantity').text or input.find('CollectionSheet/PhysicalDescriptionSet/PhysicalDescription/Dimensions').text:
if template.find('archdesc/did/physdescstructured') is None:
if template.find('archdesc/did/physdesc') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_physdesc", "collection-level Physical Description", "<physdesc> or <physdescstructured>")
if input.find('CollectionSheet/Languages/Language/Lang') is None:
pass
else:
if input.find('CollectionSheet/Languages/Language/Lang').text:
if template.find("archdesc/did/langmaterial") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_langmat", "a Collection Language or a Language Description", "<languagematerial>")
if input.find('CollectionSheet/Access/Statement') is None:
if input.find('CollectionSheet/Access/SpecificMaterialRestrictions/SpecificRestriction/Restriction') is None:
pass
else:
if input.find('CollectionSheet/Access/SpecificMaterialRestrictions/SpecificRestriction/Restriction').text:
if template.find("archdesc/accessrestrict") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_accessrestrict", "Access Restrictions", "<accessrestrict>")
else:
if input.find('CollectionSheet/Access/Statement').text or input.find('CollectionSheet/Access/SpecificMaterialRestrictions/SpecificRestriction/Restriction').text:
if template.find("archdesc/accessrestrict") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_accessrestrict", "Access Restrictions", "<accessrestrict>")
if input.find('CollectionSheet/Accruals/Accrual/Text') is None:
pass
else:
if input.find('CollectionSheet/Accruals/Accrual/Text').text:
if template.find("archdesc/accruals") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_accruals", "Accruals", "<accruals>")
if input.find('CollectionSheet/AcquisitionInfo/Acquis/Event') is None:
pass
else:
if input.find('CollectionSheet/AcquisitionInfo/Acquis/Event').text:
if template.find("archdesc/acqinfo") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_acq", "Acquisition information", "<acqinfo>")
if input.find('CollectionSheet/AlternateForms/Alternative/Text') is None:
pass
else:
if input.find('CollectionSheet/AlternateForms/Alternative/Text').text:
if template.find("archdesc/altformavail") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_altforms", "Alternate Forms or Copies", "<altformavail>")
if input.find('CollectionSheet/AppraisalInfo/Appraisal/Text') is None:
pass
else:
if input.find('CollectionSheet/AppraisalInfo/Appraisal/Text').text:
if template.find("archdesc/appraisal") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_appraisal", "Appraisal information", "<appraisal>")
if input.find('CollectionSheet/CollectionArrangement/Arrangement/Text') is None:
pass
else:
if input.find('CollectionSheet/CollectionArrangement/Arrangement/Text').text:
if template.find("archdesc/arrangement") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_arrange", "Arrangement information", "<arrangement>")
if input.find('CollectionSheet/PublicationBibliography/Publication/Title').text or input.find('CollectionSheet/ManuscriptBibliography/Manuscript/UnitTitle').text:
if template.find('archdesc/bibliography') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_biblio", "a Bibliography", "<bibliography>")
if input.find('CollectionSheet/HistoricalNote/p') is None:
pass
else:
if input.find('CollectionSheet/HistoricalNote/p').text:
if template.find('archdesc/bioghist') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_bio", "a Historical Note", "<bioghist>")
if input.find('CollectionSheet/ControlledAccess/AccessPoint/Part') is None or input.find('CollectionSheet/ControlledAccess/AccessPoint/ElementName') is None:
pass
else:
if input.find('CollectionSheet/ControlledAccess/AccessPoint/Part').text and input.find('CollectionSheet/ControlledAccess/AccessPoint/ElementName').text:
if template.find('archdesc/controlaccess') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_controlaccess", "Controlled Access points", "<controlaccess>")
if input.find('CollectionSheet/CustodialHistory/Event/Text') is None:
pass
else:
if input.find('CollectionSheet/CustodialHistory/Event/Text').text:
if template.find('archdesc/custodhist') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_custhistory", "Custodial History", "<custodhist>")
if input.find('CollectionSheet/LegalStatus/Status/Text') is None:
pass
else:
if input.find('CollectionSheet/LegalStatus/Status/Text').text:
if template.find('archdesc/legalstatus') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_legalstatus", "Legal Status information", "<legalstatus>")
if input.find('CollectionSheet/OtherFindingAids/Other/Text') is None:
pass
else:
if input.find('CollectionSheet/OtherFindingAids/Other/Text').text:
if template.find('archdesc/otherfindaid') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_otherfa", "Other Finding Aids", "<otherfindaid>")
if input.find('CollectionSheet/PhysicalTechnical/Details/Text') is None:
pass
else:
if input.find('CollectionSheet/PhysicalTechnical/Details/Text').text:
if template.find('archdesc/phystech') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_phystech", "Physical or Technical details", "<phystech>")
if input.find('CollectionSheet/PreferredCitation/Example') is None:
pass
else:
if input.find('CollectionSheet/PreferredCitation/Example').text:
if template.find('archdesc/prefercite') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_prefcite", "a Preferred Citation", "<prefercite>")
if input.find('CollectionSheet/ProcessingInformation/Details/Text') is None:
pass
else:
if input.find('CollectionSheet/ProcessingInformation/Details/Text').text:
if template.find('archdesc/processinfo') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_processinfo", "Processing information", "<processinfo>")
if input.find('CollectionSheet/RelatedPublications/Publication/Title') is None or input.find('CollectionSheet/RelatedManuscripts/Manuscript/UnitTitle') is None:
pass
else:
if input.find('CollectionSheet/RelatedPublications/Publication/Title').text or input.find('CollectionSheet/RelatedManuscripts/Manuscript/UnitTitle').text:
if template.find('archdesc/relatedmaterial') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_related", "Related Material", "<relatedmaterial>")
if input.find('CollectionSheet/ScopeContent/p') is None:
pass
else:
if input.find('CollectionSheet/ScopeContent/p').text:
if template.find('archdesc/scopecontent') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_scope", "a Scope and Content note", "<scopecontent>")
if input.find('CollectionSheet/SeparatedMaterial/Material/Text') is None:
pass
else:
if input.find('CollectionSheet/SeparatedMaterial/Material/Text').text:
if template.find('archdesc/separatedmaterial') is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_sepmat", "Separated Material", "<separatedmaterial>")
if input.find('CollectionSheet/UseRestrictions/Statement') is None:
if input.find('CollectionSheet/UseRestrictions/SpecificMaterialRestrictions/SpecificRestriction/Restriction') is None:
pass
else:
if input.find('CollectionSheet/UseRestrictions/SpecificMaterialRestrictions/SpecificRestriction/Restriction').text:
if template.find("archdesc/userestrict") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_userestrict", "Use Restrictions", "<userestrict>")
else:
if input.find('CollectionSheet/UseRestrictions/Statement').text or input.find('CollectionSheet/UseRestrictions/SpecificMaterialRestrictions/SpecificRestriction/Restriction').text:
if template.find("archdesc/userestrict") is None:
question_count = question_count + 1
self.radio_ask(verticalbox, "add_userestrict", "Use Restrictions", "<userestrict>")
"""
# UAlbany Format question
question_count = question_count + 1
ask_ualbany = wx.StaticText(self, id=-1, label="Do you want to insert UAlbany's stylesheet and follow UAlbany's local formatting for the <titleproper>?")
ask_ualbany.Wrap(530)
ask_ualbanyY = wx.RadioButton(self, label="Yes", style=wx.RB_GROUP)
ask_ualbanyN = wx.RadioButton(self, label = "No")
qbox98 = wx.BoxSizer(wx.HORIZONTAL)
abox98 = wx.BoxSizer(wx.HORIZONTAL)
qbox98.Add(ask_ualbany, 1, wx.TOP, 5)
abox98.Add(ask_ualbanyY, 1, wx.ALL, 3)
abox98.Add(ask_ualbanyN, 1, wx.ALL, 3)
verticalbox.Add(qbox98, 0, wx.TOP|wx.EXPAND, 5)
verticalbox.Add(abox98, 0, wx.ALL, 3)
SpreadsheettoEAD.func.globals.new_elements.append('ask_ualbany')
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioYes("ask_ualbany"), ask_ualbanyY)
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioNo("ask_ualbany"), ask_ualbanyN)
# <unitid> at file level question
question_count = question_count + 1
ask_fileunitid = wx.StaticText(self, id=-1, label="Do you want to keep the <unitid> tags at the file level?")
ask_fileunitid.Wrap(530)
ask_fileunitidY = wx.RadioButton(self, label="Yes", style=wx.RB_GROUP)
ask_fileunitidN = wx.RadioButton(self, label = "No")
qbox97 = wx.BoxSizer(wx.HORIZONTAL)
abox97 = wx.BoxSizer(wx.HORIZONTAL)
qbox97.Add(ask_fileunitid, 1, wx.TOP, 5)
abox97.Add(ask_fileunitidY, 1, wx.ALL, 3)
abox97.Add(ask_fileunitidN, 1, wx.ALL, 3)
verticalbox.Add(qbox97, 0, wx.TOP|wx.EXPAND, 5)
verticalbox.Add(abox97, 0, wx.ALL, 3)
SpreadsheettoEAD.func.globals.new_elements.append('ask_fileunitid')
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioYes("ask_fileunitid"), ask_fileunitidY)
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioNo("ask_fileunitid"), ask_fileunitidN)
# HTML output question
question_count = question_count + 1
ask_html = wx.StaticText(self, id=-1, label="Do you want to create a basic HTML <html> file for this collection?")
ask_html.Wrap(530)
ask_htmlY = wx.RadioButton(self, label="Yes", style=wx.RB_GROUP)
ask_htmlN = wx.RadioButton(self, label = "No")
qbox99 = wx.BoxSizer(wx.HORIZONTAL)
abox99 = wx.BoxSizer(wx.HORIZONTAL)
qbox99.Add(ask_html, 1, wx.TOP, 5)
abox99.Add(ask_htmlY, 1, wx.ALL, 3)
abox99.Add(ask_htmlN, 1, wx.ALL, 3)
verticalbox.Add(qbox99, 0, wx.TOP|wx.EXPAND, 5)
verticalbox.Add(abox99, 0, wx.ALL, 3)
SpreadsheettoEAD.func.globals.new_elements.append('ask_html')
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioYes("ask_html"), ask_htmlY)
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioNo("ask_html"), ask_htmlN)
name = input.find('CollectionSheet/CollectionName').text
if "ask_ualbany" in SpreadsheettoEAD.func.globals.new_elements:
if input.find('CollectionSheet/CollectionID').text:
cID = input.find('CollectionSheet/CollectionID').text.replace("-", "").lower()
else:
cID = ""
else:
cID = input.find('CollectionSheet/CollectionID').text
goButton = wx.Button(self, label='Create EAD')
if cID is None:
self.Bind(wx.EVT_BUTTON, lambda event: parent.goClick(event, input_xml, template_xml, name), goButton)
else:
self.Bind(wx.EVT_BUTTON, lambda event: parent.goClick(event, input_xml, template_xml, cID), goButton)
buttonbox = wx.BoxSizer(wx.HORIZONTAL)
buttonbox.Add((400, 10))
buttonbox.Add(goButton, 1, wx.ALL|wx.EXPAND, 5)
verticalbox.Add(buttonbox, 1, wx.ALL|wx.EXPAND, 5)
self.SetSizer(overallbox)
overallbox.Fit(self)
self.SetAutoLayout(1)
self.SetupScrolling()
def radioYes(self, keyword):
SpreadsheettoEAD.func.globals.new_elements.append(keyword)
#print SpreadsheettoEAD.func.globals.new_elements
def radioNo(self, keyword):
SpreadsheettoEAD.func.globals.new_elements.remove(keyword)
#print SpreadsheettoEAD.func.globals.new_elements
def radio_ask(self, verticalbox, add_var, name, element):
if name.lower() == "custom":
add_question = wx.StaticText(self, id=-1, label=element)
else:
add_question = wx.StaticText(self, id=-1, label="You entered " + name + " but there is no " + element + " tag in your EAD template, would you like EADMachine to add one for this collection?")
add_question.Wrap(530)
add_questionY = wx.RadioButton(self, label="Yes", style=wx.RB_GROUP)
add_questionN = wx.RadioButton(self, label = "No")
qbox = wx.BoxSizer(wx.HORIZONTAL)
abox = wx.BoxSizer(wx.HORIZONTAL)
qbox.Add(add_question, 1, wx.TOP, 5)
abox.Add(add_questionY, 1, wx.ALL, 3)
abox.Add(add_questionN, 1, wx.ALL, 3)
verticalbox.Add(qbox, 0, wx.TOP|wx.EXPAND, 5)
verticalbox.Add(abox, 0, wx.ALL, 3)
SpreadsheettoEAD.func.globals.new_elements.append(add_var)
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioYes(add_var), add_questionY)
self.Bind(wx.EVT_RADIOBUTTON, lambda event: self.radioNo(add_var), add_questionN)
|
# -*- coding: utf-8 -*-
"""
:mod:`orion.core.worker` -- Coordination of the optimization procedure
======================================================================
.. module:: worker
:platform: Unix
:synopsis: Executes optimization steps and runs training experiment
with parameter values suggested.
"""
import logging
from orion.core.io.database import Database
from orion.core.worker.consumer import Consumer
from orion.core.worker.producer import Producer
log = logging.getLogger(__name__)
def workon(experiment):
"""Try to find solution to the search problem defined in `experiment`."""
producer = Producer(experiment)
consumer = Consumer(experiment)
log.debug("##### Init Experiment #####")
while True:
log.debug("#### Try to reserve a new trial to evaluate.")
trial = experiment.reserve_trial(score_handle=producer.algorithm.score)
if trial is None:
log.debug("#### Failed to pull a new trial from database.")
log.debug("#### Fetch most recent completed trials and update algorithm.")
producer.update()
log.debug("#### Poll for experiment termination.")
if experiment.is_done:
break
log.debug("#### Produce new trials.")
producer.produce()
else:
log.debug("#### Successfully reserved %s to evaluate. Consuming...", trial)
consumer.consume(trial)
stats = experiment.stats
best = Database().read('trials', {'_id': stats['best_trials_id']})[0]
log.info("##### Search finished successfully #####")
log.info("\nRESULTS\n=======\n%s\n", stats)
log.info("\nBEST PARAMETERS\n===============\n%s", best)
|
#!/usr/bin/env python3
# day213.py
# By Sebastian Raaphorst
from typing import List
from itertools import combinations
def is_valid_ip_segment(s: str) -> bool:
if not s.isnumeric():
return False
if len(s) == 0:
return False
if len(s) > 1 and s[0] == '0':
return False
if int(s) > 255:
return False
return True
def find_all_perms_brute_force(s: str) -> List[str]:
"""
A brute-force technique to generate all IPs from a list of characters represented as a string.
We simply pick three places to insert the periods and then check for validity.
This produces many invalid IPs, but given the small number of IPs (11C3 = 165), time is not really a concern
and there is no reason to engage in a technique like backtracking.
:param s: the string of digits to analyze for IPs
:return: the list of valid IPs
>>> find_all_perms_brute_force('2542540123')
['254.25.40.123', '254.254.0.123']
>>> find_all_perms_brute_force('0000')
['0.0.0.0']
>>> find_all_perms_brute_force('255255255255')
['255.255.255.255']
>>> find_all_perms_brute_force('300000')
[]
"""
if not s.isnumeric() or len(s) < 4 or len(s) > 12:
return []
ips = []
# Pick the three potential positions for the dots.
for (i1, i2, i3) in combinations(range(1, len(s)), 3):
# Divide into four slices:
slices = [s[0:i1], s[i1:i2], s[i2:i3], s[i3:]]
if False not in [is_valid_ip_segment(slice) for slice in slices]:
ips.append('.'.join(slices))
return ips
|
import numpy as np
import matplotlib.pyplot as plt
import gurobipy as gp
from gurobipy import GRB, quicksum
"""
The user MUST install Gurobi to use this program.
Check https://www.gurobi.com/ for installation details.
"""
def Solve1BitCS(y,Z,m,d,s):
'''
This function creates a quadratic programming model, calls Gurobi
and solves the 1 bit CS subproblem. This function can be replaced with
any suitable function that calls a convex optimization package.
=========== INPUTS ==============
y ........... length d vector of one-bit measurements
Z ........... m-by-d sensing matrix
m ........... number of measurements
d ........... dimension of problem
s ........... sparsity level
=========== OUTPUTS =============
x_hat ....... Solution. Note that ||x_hat||_2 = 1
'''
model = gp.Model("1BitRecovery")
x = model.addVars(2*d, vtype = GRB.CONTINUOUS)
c1 = np.dot(y.T,Z)
c = list(np.concatenate((c1,-c1)))
model.setObjective(quicksum(c[i]*x[i] for i in range(0,2*d)), GRB.MAXIMIZE)
model.addConstr(quicksum(x) <= np.sqrt(s),"ell_1") # sum_i x_i <=1
model.addConstr(quicksum(x[i]*x[i] for i in range(0,2*d)) - 2*quicksum(x[i]*x[d+i] for i in range(0,d))<= 1, "ell_2") # sum_i x_i^2 <= 1
model.addConstrs(x[i] >= 0 for i in range(0,2*d))
model.Params.OUTPUTFLAG = 0
model.optimize()
TempSol = model.getAttr('x')
x_hat = np.array(TempSol[0:d] - np.array(TempSol[d:2*d]))
return x_hat
def GradientEstimator(Comparison,x_in,Z,r,m,d,s):
'''
This function estimates the gradient vector from m Comparison
oracle queries, using 1 bit compressed sensing and Gurobi
================ INPUTS ======================
Comparison............. comparison orcale
x_in .................. Any point in R^d
Z ..................... An m-by-d matrix with rows z_i uniformly sampled from unit sphere
r ..................... Sampling radius.
kappa,delta_0, mu...... Comparison oracle parameters.
m ..................... number of measurements.
d ..................... dimension of problem
s ..................... sparsity
================ OUTPUTS ======================
g_hat ........ approximation to g/||g||
tau .......... fraction of bit-flips/ incorrect one-bit measurements.
'''
y = np.zeros(m)
tau = 0
for i in range(0,m):
x_temp = Z[i,:]
y[i], bit_flipped = Comparison(x_in,x_in + r*Z[i,:])
tau += bit_flipped
g_hat = Solve1BitCS(y,Z,m,d,s)
tau = tau/m
return g_hat,tau
def GetStepSize(Comparison,x,g_hat,last_step_size,default_step_size,warm_started):
'''
This function use line search to estimate the best step size on the given
direction via noisy comparison
================ INPUTS ======================
Comparison................ comparison orcale
x ........................ current point
g_hat .................... search direction
last_step_size ........... step size from last itertion
default_step_size......... a safe lower bound of step size
kappa,delta_0, mu......... Comparison oracle parameters.
================ OUTPUTS ======================
alpha .................... step size found
less_than_defalut ........ return True if found step size less than default step size
queries_count ............ number of oracle queries used in linesearch
'''
# First make sure current step size descends
omega = 0.05
num_round = 40
descend_count = 0
queries_count = 0
less_than_defalut = False
#update_factor = np.sqrt(2)
update_factor = 2
if warm_started:
alpha = last_step_size # start with last step size
else:
alpha = default_step_size
point1 = x - alpha * g_hat
for round in range(0,num_round): # compare n rounds for every pair of points,
is_descend,bit_flipped = Comparison(point1,x)
queries_count = queries_count + 1
if is_descend == 1:
descend_count = descend_count + 1
p = descend_count/num_round
# print(p)
# we try increase step size if p is larger, try decrease step size is
# smaller, otherwise keep the current alpha
if p >= 0.5 + omega: # compare with x
while True:
point2 = x - update_factor * alpha * g_hat
descend_count = 0
for round in range(0,num_round): # compare n rounds for every pair of points,
is_descend,bit_flipped = Comparison(point2,point1) # comapre with point1
queries_count = queries_count + 1
if is_descend == 1:
descend_count = descend_count + 1
p = descend_count/num_round
if p >= 0.5 + omega:
alpha = update_factor * alpha
point1 = x - alpha * g_hat
else:
return alpha,less_than_defalut,queries_count
elif warm_started == False:
less_than_defalut = True
return alpha,less_than_defalut,queries_count
elif p <= 0.5 - omega: # else: we try decrease step size
while True:
alpha = alpha / update_factor
if alpha < default_step_size:
alpha = default_step_size
less_than_defalut = True
return alpha,less_than_defalut,queries_count
point2 = x - alpha * g_hat
descend_count = 0
for round in range(0,num_round):
is_descend,bit_flipped = Comparison(point2,x) # compare with x
queries_count = queries_count + 1
if is_descend == 1:
descend_count = descend_count + 1
p = descend_count/num_round
if p >= 0.5 + omega:
return alpha,less_than_defalut,queries_count
#else:
# alpha = last_step_size
return alpha,less_than_defalut,queries_count
def SCOBO(Comparison,object_fcn,num_iterations,default_step_size,x0,r,m,d,s,fixed_flip_rate,line_search,warm_started):
'''
This function implements the SCOBO algorithm, as described
in our paper.
=============== INPUTS ================
Comparison .................... handle of the comparison orcale
object_fcn .................... objective function, this is for recording regret only. not used for solving problem
num_iterations ................ number of iterations
default_step_size ............. default step size
x0 ............................ initial iterate
r ............................. sampling radius
kappa, delta_0,mu ............. oracle parameters
m ............................. number of samples per iteration
d ............................. dimension of problem
s ............................. sparsity level
fixed_flip_rate ............... ture if kappa==1, i.e., comparison orcale's flip rate is independent to |f(x)-f(y)|; otherwise false
line_search ................... wheather linesearch for step size. if not, use default step size
warm_started .................. wheather use warm start in linesearch
=============== OUTPUTS ================
x ............................ estimated optimum point
regret ....................... vector of errors f(x_k) - min f
tau_vec ...................... tau_vec(k) = fraction of flipped measurements at k-th iteration
c_num_queries ................ cumulative number of queries.
'''
regret = np.zeros((num_iterations,1))
tau_vec = np.zeros((num_iterations,1))
linesearch_queries = np.zeros(num_iterations)
x1 = np.squeeze(x0)
x = np.copy(x1)
Z = np.zeros((m,d))
for i in range(0,m):
temp = np.random.randn(1,d)
Z[i,:] = temp/np.linalg.norm(temp)
# start with default step size when using line search
step_size = default_step_size
if line_search:
less_than_defalut_vec = np.zeros((num_iterations,1)) # not outputing this in current version
for i in range(0,num_iterations):
if fixed_flip_rate == 1:
r = r * 0.99
g_hat,tau = GradientEstimator(Comparison,x,Z,r,m,d,s)
if line_search:
if warm_started:
if default_step_size >= 1e-4:
default_step_size = default_step_size*0.95
else:
default_step_size = 1e-4
#default_step_size = 1e-6
step_size,less_than_defalut,queries_count = GetStepSize(Comparison,x,g_hat,step_size,default_step_size,warm_started)
less_than_defalut_vec[i] = less_than_defalut
linesearch_queries[i] = queries_count
# print(queries_count)
# print(step_size)
x = x - step_size * g_hat
regret[i] = object_fcn(x) # f(x_min) = 0
tau_vec[i] = tau
print('current regret at step', i+1, ':', regret[i])
print('step_size:', step_size)
#print('gradient norm:', np.linalg.norm(g_hat))
c_num_queries = m*np.arange(start=0,stop = num_iterations,step = 1) + np.cumsum(linesearch_queries)
#x_hat = x
return x, regret,tau_vec,c_num_queries
|
#!/usr/bin/env python3
import rospy
rospy.init_node("buzzer")
rospy.spin()
|
# conding: utf-8
from sys import argv
from os.path import exists
script, fromFile, toFile = argv
print('Copying form %s to %s' %(fromFile, toFile))
# we could to these two on one line too, how?
input = open(fromFile)
inData = input.read()
print('The input file is %d bytes long' % len(inData))
print('Does the output file exist? %r' % exists(toFile))
print('Ready, hit RETURN to continue, CTRL-C to abort.')
input()
output = open(toFile, 'w')
output.write(inData)
print('Alright, all done.')
output.close()
input.close()
|
import sys,string,argparse
from optparse import OptionParser
parser = argparse.ArgumentParser(description="VS Using QuickVina2")
parser.add_argument('receptor', metavar='receptor',help="receptor in PDBQT file")
parser.add_argument('dbase', metavar='dbase', help="database in SDF file with full hydrogen")
parser.add_argument('config', metavar='configure_file', help="VINA docking configure file")
parser.add_argument('output', metavar='output', help="Docking output file in SDF format")
parser.add_argument('prefix', metavar='prefix', help="prefix for SLURM script file")
args = parser.parse_args()
receptor = args.receptor
dbase = args.dbase
config = args.config
output = args.output
prefix = args.prefix
slurmscript = prefix+'_to_run_qvina2_vs.sbatch'
slurmout = open(slurmscript,"w")
slurmout.write('#!/bin/sh \n')
slurmout.write('#SBATCH -N 1\n')
slurmout.write('#SBATCH -o '+prefix+'_out.%j\n')
slurmout.write('#SBATCH -e '+prefix+'_err.%j\n')
slurmout.write('cd $SLURM_SUBMIT_DIR\n')
slurmout.write('source ~/bin/rdkit2020_env.sh\n')
slurmout.write('python ~/bin/qvina2.py '+receptor+' '+dbase+' '+config+' '+output+' >>'+prefix+'_vina.log \n')
slurmout.close
|
# Constants (User configurable)
APP_NAME = '2018_11_20_Periscope_X_PaintingSegmentationCheck' # Used to generate derivative names unique to the application.
# DOCKER REGISTRY INFORMATION:
DOCKERHUB_TAG = 'bethcimini/distributed-cellprofiler:1.2.1_319_highmem_plugins'
# AWS GENERAL SETTINGS:
AWS_REGION = 'us-east-1'
AWS_PROFILE = 'default' # The same profile used by your AWS CLI installation
SSH_KEY_NAME = 'key.pem' # Expected to be in ~/.ssh
AWS_BUCKET = 'dummybucket'
# EC2 AND ECS INFORMATION:
ECS_CLUSTER = 'default_cluster'
CLUSTER_MACHINES = 1
TASKS_PER_MACHINE = 1
MACHINE_TYPE = ['m4.xlarge']
MACHINE_PRICE = 0.10
EBS_VOL_SIZE = 200 # In GB. Minimum allowed is 22.
# DOCKER INSTANCE RUNNING ENVIRONMENT:
DOCKER_CORES = 4 # Number of CellProfiler processes to run inside a docker container
CPU_SHARES = DOCKER_CORES * 1024 # ECS computing units assigned to each docker container (1024 units = 1 core)
MEMORY = 15000 # Memory assigned to the docker container in MB
SECONDS_TO_START = 3*60 # Wait before the next CP process is initiated to avoid memory collisions
# In GB; default is 10. The amount of hard disk space each docker container uses.
# EBS_VOL_SIZE should be >= DOCKER_BASE_SIZE * TASKS_PER_MACHINE
#SQS QUEUE INFORMATION:
SQS_QUEUE_NAME = APP_NAME + 'Queue'
SQS_MESSAGE_VISIBILITY = 20*60 # Timeout (secs) for messages in flight (average time to be processed)
SQS_DEAD_LETTER_QUEUE = 'arn:aws:sqs:us-east-1:XXXXXXXXXXXX:DeadMessages'
# LOG GROUP INFORMATION:
LOG_GROUP_NAME = APP_NAME
# REDUNDANCY CHECKS
CHECK_IF_DONE_BOOL = 'False' #True or False- should it check if there are a certain number of non-empty files and delete the job if yes?
EXPECTED_NUMBER_FILES = 5 #What is the number of files that trigger skipping a job?
MIN_FILE_SIZE_BYTES = 1
|
# Code for tagging temporal expressions in text
# For details of the TIMEX format, see http://timex2.mitre.org/
# Converted to Python3 by Brian Hockenmaier in 2019
import re
import string
import os
import sys
from datetime import datetime, timedelta
# Python3 version no longer requires eGenix.com mx Base Distribution
# http://www.egenix.com/products/python/mxBase/
# Predefined strings.
numbers = "(^a(?=\s)|one|two|three|four|five|six|seven|eight|nine|ten| \
eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen| \
eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty| \
ninety|hundred|thousand)"
day = "(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
week_day = "(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
month = "(january|february|march|april|may|june|july|august|september| \
october|november|december)"
dmy = "(year|day|week|month)"
rel_day = "(today|yesterday|tomorrow|tonight|tonite)"
exp1 = "(before|after|earlier|later|ago)"
exp2 = "(this|next|last)"
iso = "\d+[/-]\d+[/-]\d+ \d+:\d+:\d+\.\d+"
year = "((?<=\s)\d{4}|^\d{4})"
regxp1 = "((\d+|(" + numbers + "[-\s]?)+) " + dmy + "s? " + exp1 + ")"
regxp2 = "(" + exp2 + " (" + dmy + "|" + week_day + "|" + month + "))"
reg1 = re.compile(regxp1, re.IGNORECASE)
reg2 = re.compile(regxp2, re.IGNORECASE)
reg3 = re.compile(rel_day, re.IGNORECASE)
reg4 = re.compile(iso)
reg5 = re.compile(year)
def tag(text):
# Initialization
timex_found = []
# re.findall() finds all the substring matches, keep only the full
# matching string. Captures expressions such as 'number of days' ago, etc.
found = reg1.findall(text)
found = [a[0] for a in found if len(a) > 1]
for timex in found:
timex_found.append(timex)
# Variations of this thursday, next year, etc
found = reg2.findall(text)
found = [a[0] for a in found if len(a) > 1]
for timex in found:
timex_found.append(timex)
# today, tomorrow, etc
found = reg3.findall(text)
for timex in found:
timex_found.append(timex)
# ISO
found = reg4.findall(text)
for timex in found:
timex_found.append(timex)
# Year
found = reg5.findall(text)
for timex in found:
timex_found.append(timex)
# Tag only temporal expressions which haven't been tagged.
for timex in timex_found:
text = re.sub(timex + '(?!</TIMEX2>)', '<TIMEX2>' + timex + '</TIMEX2>', text)
return text
# Hash function for week days to simplify the grounding task.
# [Mon..Sun] -> [0..6]
hashweekdays = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6}
# Hash function for months to simplify the grounding task.
# [Jan..Dec] -> [1..12]
hashmonths = {
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12}
# Hash number in words into the corresponding integer value
def hashnum(number):
if re.match(r'one|^a\b', number, re.IGNORECASE):
return 1
if re.match(r'two', number, re.IGNORECASE):
return 2
if re.match(r'three', number, re.IGNORECASE):
return 3
if re.match(r'four', number, re.IGNORECASE):
return 4
if re.match(r'five', number, re.IGNORECASE):
return 5
if re.match(r'six', number, re.IGNORECASE):
return 6
if re.match(r'seven', number, re.IGNORECASE):
return 7
if re.match(r'eight', number, re.IGNORECASE):
return 8
if re.match(r'nine', number, re.IGNORECASE):
return 9
if re.match(r'ten', number, re.IGNORECASE):
return 10
if re.match(r'eleven', number, re.IGNORECASE):
return 11
if re.match(r'twelve', number, re.IGNORECASE):
return 12
if re.match(r'thirteen', number, re.IGNORECASE):
return 13
if re.match(r'fourteen', number, re.IGNORECASE):
return 14
if re.match(r'fifteen', number, re.IGNORECASE):
return 15
if re.match(r'sixteen', number, re.IGNORECASE):
return 16
if re.match(r'seventeen', number, re.IGNORECASE):
return 17
if re.match(r'eighteen', number, re.IGNORECASE):
return 18
if re.match(r'nineteen', number, re.IGNORECASE):
return 19
if re.match(r'twenty', number, re.IGNORECASE):
return 20
if re.match(r'thirty', number, re.IGNORECASE):
return 30
if re.match(r'forty', number, re.IGNORECASE):
return 40
if re.match(r'fifty', number, re.IGNORECASE):
return 50
if re.match(r'sixty', number, re.IGNORECASE):
return 60
if re.match(r'seventy', number, re.IGNORECASE):
return 70
if re.match(r'eighty', number, re.IGNORECASE):
return 80
if re.match(r'ninety', number, re.IGNORECASE):
return 90
if re.match(r'hundred', number, re.IGNORECASE):
return 100
if re.match(r'thousand', number, re.IGNORECASE):
return 1000
# Given a timex_tagged_text and a Date object set to base_date,
# returns timex_grounded_text
def ground(tagged_text, base_date):
# Find all identified timex and put them into a list
timex_regex = re.compile(r'<TIMEX2>.*?</TIMEX2>', re.DOTALL)
timex_found = timex_regex.findall(tagged_text)
timex_found = map(lambda timex:re.sub(r'</?TIMEX2.*?>', '', timex), \
timex_found)
timexList = []
# Calculate the new date accordingly
for timex in timex_found:
# global month
month = "(january|february|march|april|may|june|july|august|september| \
october|november|december)"
timex_val = 'UNKNOWN' # Default value
timex_ori = timex # Backup original timex for later substitution
# If numbers are given in words, hash them into corresponding numbers.
# eg. twenty five days ago --> 25 days ago
if re.search(numbers, timex, re.IGNORECASE):
split_timex = re.split(r'\s(?=days?|months?|years?|weeks?)', \
timex, re.IGNORECASE)
value = split_timex[0]
unit = split_timex[1]
num_list = map(lambda s:hashnum(s),re.findall(numbers + '+', \
value, re.IGNORECASE))
timex = sum(num_list) + ' ' + unit
# If timex matches ISO format, remove 'time' and reorder 'date'
if re.match(r'\d+[/-]\d+[/-]\d+ \d+:\d+:\d+\.\d+', timex):
dmy = re.split(r'\s', timex)[0]
dmy = re.split(r'/|-', dmy)
timex_val = str(dmy[2]) + '-' + str(dmy[1]) + '-' + str(dmy[0])
# Specific dates
elif re.match(r'\d{4}', timex):
timex_val = str(timex)
# Relative dates
elif re.match(r'tonight|tonite|today', timex, re.IGNORECASE):
timex_val = str(base_date)
elif re.match(r'yesterday', timex, re.IGNORECASE):
timex_val = str(base_date + timedelta(days=-1))
elif re.match(r'tomorrow', timex, re.IGNORECASE):
timex_val = str(base_date + timedelta(days=+1))
# Weekday in the previous week.
elif re.match(r'last ' + week_day, timex, re.IGNORECASE):
target_day = hashweekdays[timex.split()[1]]
monday_of_base_week = base_date - timedelta(days=base_date.weekday())
monday_of_target_week = base_date + timedelta(weeks=-1)
timex_val = str(monday_of_target_week + timedelta(days=target_day+1))
# Weekday in the current week.
elif re.match(r'this ' + week_day, timex, re.IGNORECASE):
target_day = hashweekdays[timex.split()[1]]
monday_of_base_week = base_date - timedelta(days=base_date.weekday())
monday_of_target_week = base_date + timedelta(weeks=0)
timex_val = str(monday_of_target_week + timedelta(days=target_day+1))
# Weekday in the following week.
elif re.match(r'next ' + week_day, timex, re.IGNORECASE):
target_day = hashweekdays[timex.split()[1]]
monday_of_base_week = base_date - timedelta(days=base_date.weekday())
monday_of_target_week = base_date + timedelta(weeks=+1)
timex_val = str(monday_of_target_week + timedelta(days=target_day+1))
# Last, this, next week.
elif re.match(r'last week', timex, re.IGNORECASE):
year = (base_date + timedelta(weeks=-1)).year
# iso_week returns a triple (year, week, day) hence, retrieve
# only week value.
week = (base_date + timedelta(weeks=-1)).isocalendar()[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'this week', timex, re.IGNORECASE):
year = (base_date + timedelta(weeks=0)).year
week = (base_date + timedelta(weeks=0)).isocalendar()[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'next week', timex, re.IGNORECASE):
year = (base_date + timedelta(weeks=+1)).year
week = (base_date + timedelta(weeks=+1)).isocalendar()[1]
timex_val = str(year) + 'W' + str(week)
# Month in the previous year.
elif re.match(r'last ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1]]
timex_val = str(base_date.year - 1) + '-' + str(month)
# Month in the current year.
elif re.match(r'this ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1]]
timex_val = str(base_date.year) + '-' + str(month)
# Month in the following year.
elif re.match(r'next ' + month, timex, re.IGNORECASE):
month = hashmonths[timex.split()[1]]
timex_val = str(base_date.year + 1) + '-' + str(month)
elif re.match(r'last month', timex, re.IGNORECASE):
# Handles the year boundary.
if base_date.month == 1:
timex_val = str(base_date.year - 1) + '-' + '12'
else:
timex_val = str(base_date.year) + '-' + str(base_date.month - 1)
elif re.match(r'this month', timex, re.IGNORECASE):
timex_val = str(base_date.year) + '-' + str(base_date.month)
elif re.match(r'next month', timex, re.IGNORECASE):
# Handles the year boundary.
if base_date.month == 12:
timex_val = str(base_date.year + 1) + '-' + '1'
else:
timex_val = str(base_date.year) + '-' + str(base_date.month + 1)
elif re.match(r'last year', timex, re.IGNORECASE):
timex_val = str(base_date.year - 1)
elif re.match(r'this year', timex, re.IGNORECASE):
timex_val = str(base_date.year)
elif re.match(r'next year', timex, re.IGNORECASE):
timex_val = str(base_date.year + 1)
elif re.match(r'\d+ days? (ago|earlier|before)', timex, re.IGNORECASE):
# Calculate the offset by taking '\d+' part from the timex.
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date + timedelta(days=-offset))
elif re.match(r'\d+ days? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date + timedelta(days=+offset))
elif re.match(r'\d+ weeks? (ago|earlier|before)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
year = (base_date + timedelta(weeks=-offset)).year
week = (base_date + \
timedelta(weeks=-offset)).isocalendar()[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'\d+ weeks? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
year = (base_date + timedelta(weeks=+offset)).year
week = (base_date + timedelta(weeks=+offset)).isocalendar()[1]
timex_val = str(year) + 'W' + str(week)
elif re.match(r'\d+ months? (ago|earlier|before)', timex, re.IGNORECASE):
extra = 0
offset = int(re.split(r'\s', timex)[0])
# Checks if subtracting the remainder of (offset / 12) to the base month
# crosses the year boundary.
if (base_date.month - offset % 12) < 1:
extra = 1
# Calculate new values for the year and the month.
year = str(base_date.year - offset // 12 - extra)
month = str((base_date.month - offset % 12) % 12)
# Fix for the special case.
if month == '0':
month = '12'
timex_val = year + '-' + month
elif re.match(r'\d+ months? (later|after)', timex, re.IGNORECASE):
extra = 0
offset = int(re.split(r'\s', timex)[0])
if (base_date.month + offset % 12) > 12:
extra = 1
year = str(base_date.year + offset // 12 + extra)
month = str((base_date.month + offset % 12) % 12)
if month == '0':
month = '12'
timex_val = year + '-' + month
elif re.match(r'\d+ years? (ago|earlier|before)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date.year - offset)
elif re.match(r'\d+ years? (later|after)', timex, re.IGNORECASE):
offset = int(re.split(r'\s', timex)[0])
timex_val = str(base_date.year + offset)
# Remove 'time' from timex_val.
# For example, If timex_val = 2000-02-20 12:23:34.45, then
# timex_val = 2000-02-20
timex_val = re.sub(r'\s.*', '', timex_val)
# Substitute tag+timex in the text with grounded tag+timex.
tagged_text = re.sub('<TIMEX2>' + timex_ori + '</TIMEX2>', '<TIMEX2 val=\"' \
+ timex_val + '\">' + timex_ori + '</TIMEX2>', tagged_text)
timexList.append({
"text": timex_ori,
"value": timex_val
})
return tagged_text, timexList
####
def demo():
import nltk
text = nltk.corpus.abc.raw('rural.txt')[:10000]
print(tag(text))
if __name__ == '__main__':
demo()
|
#!/usr/bin/env python3
# Tests check_format.py. This must be run in a context where the clang
# version and settings are compatible with the one in the Envoy
# docker. Normally this is run via check_format_test.sh, which
# executes it in under docker.
from __future__ import print_function
from run_command import run_command
import argparse
import logging
import os
import shutil
import sys
import tempfile
curr_dir = os.path.dirname(os.path.realpath(__file__))
tools = os.path.dirname(curr_dir)
src = os.path.join(tools, 'testdata', 'check_format')
check_format = sys.executable + " " + os.path.join(curr_dir, 'check_format.py')
errors = 0
# Runs the 'check_format' operation, on the specified file, printing
# the comamnd run and the status code as well as the stdout, and returning
# all of that to the caller.
def run_check_format(operation, filename):
command = check_format + " " + operation + " " + filename
status, stdout, stderr = run_command(command)
return (command, status, stdout + stderr)
def get_input_file(filename, extra_input_files=None):
files_to_copy = [filename]
if extra_input_files is not None:
files_to_copy.extend(extra_input_files)
for f in files_to_copy:
infile = os.path.join(src, f)
directory = os.path.dirname(f)
if not directory == '' and not os.path.isdir(directory):
os.makedirs(directory)
shutil.copyfile(infile, f)
return filename
# Attempts to fix file, returning a 4-tuple: the command, input file name,
# output filename, captured stdout as an array of lines, and the error status
# code.
def fix_file_helper(filename, extra_input_files=None):
command, status, stdout = run_check_format(
"fix", get_input_file(filename, extra_input_files=extra_input_files))
infile = os.path.join(src, filename)
return command, infile, filename, status, stdout
# Attempts to fix a file, returning the status code and the generated output.
# If the fix was successful, the diff is returned as a string-array. If the file
# was not fixable, the error-messages are returned as a string-array.
def fix_file_expecting_success(file, extra_input_files=None):
command, infile, outfile, status, stdout = fix_file_helper(file,
extra_input_files=extra_input_files)
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout)
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile + '.gold')
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout + stderr)
return 1
return 0
def fix_file_expecting_no_change(file):
command, infile, outfile, status, stdout = fix_file_helper(file)
if status != 0:
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile)
if status != 0:
logging.error(file + ': expected file to remain unchanged')
return 1
return 0
def emit_stdout_as_error(stdout):
logging.error("\n".join(stdout))
def expect_error(filename, status, stdout, expected_substring):
if status == 0:
logging.error("%s: Expected failure `%s`, but succeeded" % (filename, expected_substring))
return 1
for line in stdout:
if expected_substring in line:
return 0
logging.error("%s: Could not find '%s' in:\n" % (filename, expected_substring))
emit_stdout_as_error(stdout)
return 1
def fix_file_expecting_failure(filename, expected_substring):
command, infile, outfile, status, stdout = fix_file_helper(filename)
return expect_error(filename, status, stdout, expected_substring)
def check_file_expecting_error(filename, expected_substring, extra_input_files=None):
command, status, stdout = run_check_format(
"check", get_input_file(filename, extra_input_files=extra_input_files))
return expect_error(filename, status, stdout, expected_substring)
def check_and_fix_error(filename, expected_substring, extra_input_files=None):
errors = check_file_expecting_error(filename,
expected_substring,
extra_input_files=extra_input_files)
errors += fix_file_expecting_success(filename, extra_input_files=extra_input_files)
return errors
def check_tool_not_found_error():
# Temporarily change PATH to test the error about lack of external tools.
oldPath = os.environ["PATH"]
os.environ["PATH"] = "/sbin:/usr/sbin"
clang_format = os.getenv("CLANG_FORMAT", "clang-format-9")
# If CLANG_FORMAT points directly to the binary, skip this test.
if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK):
os.environ["PATH"] = oldPath
return 0
errors = check_file_expecting_error("no_namespace_envoy.cc",
"Command %s not found." % clang_format)
os.environ["PATH"] = oldPath
return errors
def check_unfixable_error(filename, expected_substring):
errors = check_file_expecting_error(filename, expected_substring)
errors += fix_file_expecting_failure(filename, expected_substring)
return errors
def check_file_expecting_ok(filename):
command, status, stdout = run_check_format("check", get_input_file(filename))
if status != 0:
logging.error("Expected %s to have no errors; status=%d, output:\n" % (filename, status))
emit_stdout_as_error(stdout)
return status + fix_file_expecting_no_change(filename)
def run_checks():
errors = 0
# The following error is the error about unavailability of external tools.
errors += check_tool_not_found_error()
# The following errors can be detected but not fixed automatically.
errors += check_unfixable_error("no_namespace_envoy.cc",
"Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("mutex.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error("condition_variable.cc",
"Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error("condition_variable_any.cc",
"Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
real_time_inject_error = (
"Don't reference real-world time sources from production code; use injection")
errors += check_unfixable_error("real_time_source.cc", real_time_inject_error)
errors += check_unfixable_error("real_time_system.cc", real_time_inject_error)
errors += check_unfixable_error(
"duration_value.cc",
"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)"
)
errors += check_unfixable_error("system_clock.cc", real_time_inject_error)
errors += check_unfixable_error("steady_clock.cc", real_time_inject_error)
errors += check_unfixable_error(
"unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
errors += check_unfixable_error("condvar_wait_for.cc", real_time_inject_error)
errors += check_unfixable_error("sleep.cc", real_time_inject_error)
errors += check_unfixable_error("std_atomic_free_functions.cc", "std::atomic_*")
errors += check_unfixable_error("std_get_time.cc", "std::get_time")
errors += check_unfixable_error("no_namespace_envoy.cc",
"Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("bazel_tools.BUILD", "unexpected @bazel_tools reference")
errors += check_unfixable_error("proto.BUILD",
"unexpected direct external dependency on protobuf")
errors += check_unfixable_error("proto_deps.cc",
"unexpected direct dependency on google.protobuf")
errors += check_unfixable_error("attribute_packed.cc", "Don't use __attribute__((packed))")
errors += check_unfixable_error("designated_initializers.cc",
"Don't use designated initializers")
errors += check_unfixable_error("elvis_operator.cc", "Don't use the '?:' operator")
errors += check_unfixable_error("testing_test.cc",
"Don't use 'using testing::Test;, elaborate the type instead")
errors += check_unfixable_error(
"serialize_as_string.cc",
"Don't use MessageLite::SerializeAsString for generating deterministic serialization")
errors += check_unfixable_error(
"version_history/current.rst",
"Version history not in alphabetical order (zzzzz vs aaaaa): please check placement of line"
)
errors += check_unfixable_error(
"version_history/current.rst",
"Version history not in alphabetical order (this vs aaaa): please check placement of line")
errors += check_unfixable_error(
"version_history/current.rst",
"Version history line malformed. Does not match VERSION_HISTORY_NEW_LINE_REGEX in "
"check_format.py")
errors += check_unfixable_error(
"counter_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"gauge_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"histogram_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
errors += check_unfixable_error(
"grpc_init.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error(
"grpc_shutdown.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error("clang_format_double_off.cc", "clang-format nested off")
errors += check_unfixable_error("clang_format_trailing_off.cc", "clang-format remains off")
errors += check_unfixable_error("clang_format_double_on.cc", "clang-format nested on")
errors += fix_file_expecting_failure(
"api/missing_package.proto",
"Unable to find package name for proto file: ./api/missing_package.proto")
errors += check_unfixable_error("proto_enum_mangling.cc",
"Don't use mangled Protobuf names for enum constants")
errors += check_unfixable_error(
"test_naming.cc", "Test names should be CamelCase, starting with a capital letter")
errors += check_unfixable_error("mock_method_n.cc", "use MOCK_METHOD() instead")
errors += check_unfixable_error("for_each_n.cc", "use an alternative for loop instead")
errors += check_unfixable_error(
"test/register_factory.cc",
"Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use "
"Registry::InjectFactory instead.")
errors += check_unfixable_error("strerror.cc",
"Don't use strerror; use Envoy::errorDetails instead")
errors += check_unfixable_error(
"std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead " +
"or absl::node_hash_map if pointer stability of keys/values is required")
errors += check_unfixable_error(
"std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " +
"or absl::node_hash_set if pointer stability of keys/values is required")
errors += check_unfixable_error("std_any.cc", "Don't use std::any; use absl::any instead")
errors += check_unfixable_error("std_get_if.cc",
"Don't use std::get_if; use absl::get_if instead")
errors += check_unfixable_error(
"std_holds_alternative.cc",
"Don't use std::holds_alternative; use absl::holds_alternative instead")
errors += check_unfixable_error(
"std_make_optional.cc", "Don't use std::make_optional; use absl::make_optional instead")
errors += check_unfixable_error("std_monostate.cc",
"Don't use std::monostate; use absl::monostate instead")
errors += check_unfixable_error("std_optional.cc",
"Don't use std::optional; use absl::optional instead")
errors += check_unfixable_error("std_string_view.cc",
"Don't use std::string_view; use absl::string_view instead")
errors += check_unfixable_error("std_variant.cc",
"Don't use std::variant; use absl::variant instead")
errors += check_unfixable_error("std_visit.cc", "Don't use std::visit; use absl::visit instead")
errors += check_unfixable_error(
"throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.")
errors += check_unfixable_error("pgv_string.proto", "min_bytes is DEPRECATED, Use min_len.")
errors += check_file_expecting_ok("commented_throw.cc")
errors += check_unfixable_error("repository_url.bzl",
"Only repository_locations.bzl may contains URL references")
errors += check_unfixable_error("repository_urls.bzl",
"Only repository_locations.bzl may contains URL references")
# The following files have errors that can be automatically fixed.
errors += check_and_fix_error("over_enthusiastic_spaces.cc",
"./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error("extra_enthusiastic_spaces.cc",
"./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error("angle_bracket_include.cc",
"envoy includes should not have angle brackets")
errors += check_and_fix_error("proto_style.cc", "incorrect protobuf type reference")
errors += check_and_fix_error("long_line.cc", "clang-format check failed")
errors += check_and_fix_error("header_order.cc", "header_order.py check failed")
errors += check_and_fix_error("clang_format_on.cc",
"./clang_format_on.cc:7: over-enthusiastic spaces")
# Validate that a missing license is added.
errors += check_and_fix_error("license.BUILD", "envoy_build_fixer check failed")
# Validate that an incorrect license is replaced and reordered.
errors += check_and_fix_error("update_license.BUILD", "envoy_build_fixer check failed")
# Validate that envoy_package() is added where there is an envoy_* rule occurring.
errors += check_and_fix_error("add_envoy_package.BUILD", "envoy_build_fixer check failed")
# Validate that we don't add envoy_package() when no envoy_* rule.
errors += check_file_expecting_ok("skip_envoy_package.BUILD")
# Validate that we clean up gratuitous blank lines.
errors += check_and_fix_error("canonical_spacing.BUILD", "envoy_build_fixer check failed")
# Validate that unused loads are removed.
errors += check_and_fix_error("remove_unused_loads.BUILD", "envoy_build_fixer check failed")
# Validate that API proto package deps are computed automagically.
errors += check_and_fix_error("canonical_api_deps.BUILD",
"envoy_build_fixer check failed",
extra_input_files=[
"canonical_api_deps.cc", "canonical_api_deps.h",
"canonical_api_deps.other.cc"
])
errors += check_and_fix_error("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix")
errors += check_and_fix_error("proto_format.proto", "clang-format check failed")
errors += check_and_fix_error(
"cpp_std.cc",
"term absl::make_unique< should be replaced with standard library term std::make_unique<")
errors += check_and_fix_error("code_conventions.cc",
"term .Times(1); should be replaced with preferred term ;")
errors += check_file_expecting_ok("real_time_source_override.cc")
errors += check_file_expecting_ok("duration_value_zero.cc")
errors += check_file_expecting_ok("time_system_wait_for.cc")
errors += check_file_expecting_ok("clang_format_off.cc")
return errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tester for check_format.py.')
parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=args.log)
# Now create a temp directory to copy the input files, so we can fix them
# without actually fixing our testdata. This requires chdiring to the temp
# directory, so it's annoying to comingle check-tests and fix-tests.
with tempfile.TemporaryDirectory() as tmp:
os.chdir(tmp)
errors = run_checks()
if errors != 0:
logging.error("%d FAILURES" % errors)
exit(1)
logging.warning("PASS")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.